code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=2 , ): '''simple docstring''' _lowerCAmelCase : List[Any] = parent _lowerCAmelCase : List[Any] = batch_size _lowerCAmelCase : Optional[Any] = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : Dict = num_channels _lowerCAmelCase : int = is_training _lowerCAmelCase : str = use_labels _lowerCAmelCase : List[str] = hidden_size _lowerCAmelCase : Tuple = num_hidden_layers _lowerCAmelCase : List[str] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Optional[int] = hidden_act _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : List[Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = type_sequence_label_size _lowerCAmelCase : List[Any] = initializer_range _lowerCAmelCase : List[Any] = scope _lowerCAmelCase : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2 _lowerCAmelCase : Optional[int] = num_patches + 2 def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : List[str] = None if self.use_labels: _lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Tuple = self.get_config() return config, pixel_values, labels def a ( self ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = DeiTModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = DeiTForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Optional[int] = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase : Any = 1 _lowerCAmelCase : Optional[int] = DeiTForMaskedImageModeling(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.type_sequence_label_size _lowerCAmelCase : List[str] = DeiTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase : str = 1 _lowerCAmelCase : Any = DeiTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[int] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : List[str] = config_and_inputs _lowerCAmelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = DeiTModelTester(self ) _lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Union[str, Any] = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = model_class(snake_case__ ) _lowerCAmelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Tuple = [*signature.parameters.keys()] _lowerCAmelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a ( self ): '''simple docstring''' if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : List[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(snake_case__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _lowerCAmelCase : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.train() _lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : str = model(**snake_case__ ).loss loss.backward() def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _lowerCAmelCase : str = False _lowerCAmelCase : str = True for model_class in self.all_model_classes: if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _lowerCAmelCase : Tuple = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() _lowerCAmelCase : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Optional[int] = model(**snake_case__ ).loss loss.backward() def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : int = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(snake_case__ ), *get_values(snake_case__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ): _lowerCAmelCase : Optional[Any] = problem_type['title'] _lowerCAmelCase : List[str] = problem_type['num_labels'] _lowerCAmelCase : Union[str, Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() _lowerCAmelCase : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if problem_type["num_labels"] > 1: _lowerCAmelCase : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) _lowerCAmelCase : Union[str, Any] = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=snake_case__ ) as warning_list: _lowerCAmelCase : Any = model(**snake_case__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def a ( self ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = DeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( snake_case__ ) _lowerCAmelCase : int = self.default_image_processor _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : List[Any] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : int = model(**snake_case__ ) # verify the logits _lowerCAmelCase : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) _lowerCAmelCase : Any = self.default_image_processor _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Tuple = inputs.pixel_values.to(snake_case__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _lowerCAmelCase : Union[str, Any] = model(snake_case__ )
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = LDMTextToImagePipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } __magic_name__ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = False def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _lowerCAmelCase : Dict = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) _lowerCAmelCase : Dict = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCAmelCase : Dict = CLIPTextModel(snake_case__ ) _lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _lowerCAmelCase : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Any = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : Any = self.get_dummy_components() _lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : int = pipe(**snake_case__ ).images _lowerCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _lowerCAmelCase : Any = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ ) _lowerCAmelCase : Dict = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) ) _lowerCAmelCase : List[str] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ) _lowerCAmelCase : str = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : str = self.get_inputs(snake_case__ ) _lowerCAmelCase : str = pipe(**snake_case__ ).images _lowerCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _lowerCAmelCase : List[str] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) _lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = torch.manual_seed(snake_case__ ) _lowerCAmelCase : Union[str, Any] = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) ) _lowerCAmelCase : List[str] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ) _lowerCAmelCase : Optional[int] = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Any = self.get_inputs(snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images[0] _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) _lowerCAmelCase : Optional[int] = np.abs(expected_image - image ).max() assert max_diff < 1E-3
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from __future__ import annotations import queue class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = data _lowerCAmelCase : str = None _lowerCAmelCase : Union[str, Any] = None def lowercase (): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) _lowerCAmelCase : Union[str, Any] = input('Enter the value of the root node: ' ).strip().lower() _lowerCAmelCase : queue.Queue = queue.Queue() _lowerCAmelCase : Optional[int] = TreeNode(int(_A ) ) q.put(_A ) while not q.empty(): _lowerCAmelCase : str = q.get() _lowerCAmelCase : Any = f'Enter the left node of {node_found.data}: ' _lowerCAmelCase : Optional[Any] = input(_A ).strip().lower() or 'n' if check == "n": return tree_node _lowerCAmelCase : Optional[int] = TreeNode(int(_A ) ) _lowerCAmelCase : Optional[int] = left_node q.put(_A ) _lowerCAmelCase : List[Any] = f'Enter the right node of {node_found.data}: ' _lowerCAmelCase : int = input(_A ).strip().lower() or 'n' if check == "n": return tree_node _lowerCAmelCase : int = TreeNode(int(_A ) ) _lowerCAmelCase : Union[str, Any] = right_node q.put(_A ) raise def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return _lowerCAmelCase : queue.Queue = queue.Queue() q.put(_A ) while not q.empty(): _lowerCAmelCase : str = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return _lowerCAmelCase : queue.Queue = queue.Queue() q.put(_A ) while not q.empty(): _lowerCAmelCase : Dict = [] while not q.empty(): _lowerCAmelCase : List[Any] = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(_A ) def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return _lowerCAmelCase : list[TreeNode] = [] _lowerCAmelCase : int = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(_A ) _lowerCAmelCase : Union[str, Any] = n.left # end of while means current node doesn't have left child _lowerCAmelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCAmelCase : List[Any] = n.right def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return _lowerCAmelCase : list[TreeNode] = [] _lowerCAmelCase : Union[str, Any] = node while n or stack: while n: stack.append(_A ) _lowerCAmelCase : Optional[int] = n.left _lowerCAmelCase : str = stack.pop() print(n.data , end=',' ) _lowerCAmelCase : Union[str, Any] = n.right def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ) or not node: return _lowerCAmelCase , _lowerCAmelCase : str = [], [] _lowerCAmelCase : List[Any] = node stacka.append(_A ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase : List[Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(_A ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowercase (_A = "" , _A=5_0 , _A="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase : str = divmod(width - len(_A ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCAmelCase : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCAmelCase : List[Any] = TypeVar("""KT""") lowerCAmelCase : Optional[int] = TypeVar("""VT""") class UpperCamelCase__ ( Generic[KT, VT] ): """simple docstring""" def __init__( self , snake_case__ = "root" , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[Any] = key _lowerCAmelCase : Tuple = value _lowerCAmelCase : list[Node[KT, VT]] = [] def __repr__( self ): '''simple docstring''' return F'Node({self.key}: {self.value})' @property def a ( self ): '''simple docstring''' return len(self.forward ) class UpperCamelCase__ ( Generic[KT, VT] ): """simple docstring""" def __init__( self , snake_case__ = 0.5 , snake_case__ = 16 ): '''simple docstring''' _lowerCAmelCase : Node[KT, VT] = Node[KT, VT]() _lowerCAmelCase : List[str] = 0 _lowerCAmelCase : List[str] = p _lowerCAmelCase : str = max_level def __str__( self ): '''simple docstring''' _lowerCAmelCase : int = list(self ) if len(snake_case__ ) == 0: return F'SkipList(level={self.level})' _lowerCAmelCase : Any = max((len(str(snake_case__ ) ) for item in items) , default=4 ) _lowerCAmelCase : List[str] = max(snake_case__ , 4 ) + 4 _lowerCAmelCase : Union[str, Any] = self.head _lowerCAmelCase : int = [] _lowerCAmelCase : Any = node.forward.copy() lines.append(F'[{node.key}]'.ljust(snake_case__ , '-' ) + '* ' * len(snake_case__ ) ) lines.append(' ' * label_size + '| ' * len(snake_case__ ) ) while len(node.forward ) != 0: _lowerCAmelCase : Union[str, Any] = node.forward[0] lines.append( F'[{node.key}]'.ljust(snake_case__ , '-' ) + ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) ) lines.append(' ' * label_size + '| ' * len(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = node.forward lines.append('None'.ljust(snake_case__ ) + '* ' * len(snake_case__ ) ) return F'SkipList(level={self.level})\n' + "\n".join(snake_case__ ) def __iter__( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.head while len(node.forward ) != 0: yield node.forward[0].key _lowerCAmelCase : List[Any] = node.forward[0] def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 1 while random() < self.p and level < self.max_level: level += 1 return level def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = [] _lowerCAmelCase : Optional[int] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: _lowerCAmelCase : Any = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._locate_node(snake_case__ ) if node is not None: for i, update_node in enumerate(snake_case__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: _lowerCAmelCase : Optional[int] = node.forward[i] else: _lowerCAmelCase : int = update_node.forward[:i] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : int = self._locate_node(snake_case__ ) if node is not None: _lowerCAmelCase : Any = value else: _lowerCAmelCase : Dict = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case__ ): update_vector.append(self.head ) _lowerCAmelCase : Optional[int] = level _lowerCAmelCase : List[str] = Node(snake_case__ , snake_case__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case__ ) else: _lowerCAmelCase : Union[str, Any] = new_node def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : str = self._locate_node(snake_case__ ) if node is not None: return node.value return None def lowercase (): """simple docstring""" _lowerCAmelCase : Any = SkipList() skip_list.insert('Key1' , 3 ) skip_list.insert('Key2' , 1_2 ) skip_list.insert('Key3' , 4_1 ) skip_list.insert('Key4' , -1_9 ) _lowerCAmelCase : Optional[int] = skip_list.head _lowerCAmelCase : Any = {} while node.level != 0: _lowerCAmelCase : List[str] = node.forward[0] _lowerCAmelCase : int = node.value assert len(_A ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 1_2 assert all_values["Key3"] == 4_1 assert all_values["Key4"] == -1_9 def lowercase (): """simple docstring""" _lowerCAmelCase : Any = SkipList() skip_list.insert('Key1' , 1_0 ) skip_list.insert('Key1' , 1_2 ) skip_list.insert('Key5' , 7 ) skip_list.insert('Key7' , 1_0 ) skip_list.insert('Key10' , 5 ) skip_list.insert('Key7' , 7 ) skip_list.insert('Key5' , 5 ) skip_list.insert('Key10' , 1_0 ) _lowerCAmelCase : Union[str, Any] = skip_list.head _lowerCAmelCase : Optional[Any] = {} while node.level != 0: _lowerCAmelCase : List[Any] = node.forward[0] _lowerCAmelCase : Any = node.value if len(_A ) != 4: print() assert len(_A ) == 4 assert all_values["Key1"] == 1_2 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 1_0 def lowercase (): """simple docstring""" _lowerCAmelCase : List[Any] = SkipList() assert skip_list.find('Some key' ) is None def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = SkipList() skip_list.insert('Key2' , 2_0 ) assert skip_list.find('Key2' ) == 2_0 skip_list.insert('Some Key' , 1_0 ) skip_list.insert('Key2' , 8 ) skip_list.insert('V' , 1_3 ) assert skip_list.find('Y' ) is None assert skip_list.find('Key2' ) == 8 assert skip_list.find('Some Key' ) == 1_0 assert skip_list.find('V' ) == 1_3 def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = SkipList() skip_list.delete('Some key' ) assert len(skip_list.head.forward ) == 0 def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('V' ) skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('Key2' ) is None def lowercase (): """simple docstring""" _lowerCAmelCase : List[str] = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('V' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) == 1_4 assert skip_list.find('Key1' ) == 1_2 assert skip_list.find('Key2' ) == 1_5 skip_list.delete('X' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) == 1_2 assert skip_list.find('Key2' ) == 1_5 skip_list.delete('Key1' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) == 1_5 skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) is None def lowercase (): """simple docstring""" _lowerCAmelCase : List[str] = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4_2 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('X' ) def traverse_keys(_A ): yield node.key for forward_node in node.forward: yield from traverse_keys(_A ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowercase (): """simple docstring""" def is_sorted(_A ): return all(next_item >= item for item, next_item in zip(_A , lst[1:] ) ) _lowerCAmelCase : Optional[Any] = SkipList() for i in range(1_0 ): skip_list.insert(_A , _A ) assert is_sorted(list(_A ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_A ) ) skip_list.insert(-1_2 , -1_2 ) skip_list.insert(7_7 , 7_7 ) assert is_sorted(list(_A ) ) def lowercase (): """simple docstring""" for _ in range(1_0_0 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowercase (): """simple docstring""" _lowerCAmelCase : Any = SkipList() skip_list.insert(2 , '2' ) skip_list.insert(4 , '4' ) skip_list.insert(6 , '4' ) skip_list.insert(4 , '5' ) skip_list.insert(8 , '4' ) skip_list.insert(9 , '4' ) skip_list.delete(4 ) print(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = tempfile.mkdtemp() _lowerCAmelCase : Optional[Any] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] _lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowerCAmelCase : List[str] = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], 'do_convert_rgb': True, } _lowerCAmelCase : Dict = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case__ , snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.get_tokenizer() _lowerCAmelCase : Any = self.get_rust_tokenizer() _lowerCAmelCase : Optional[int] = self.get_image_processor() _lowerCAmelCase : str = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) _lowerCAmelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ ) _lowerCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) _lowerCAmelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case__ ) self.assertIsInstance(processor_fast.tokenizer , snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case__ ) self.assertIsInstance(processor_fast.image_processor , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase : Dict = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) _lowerCAmelCase : int = self.get_image_processor(do_normalize=snake_case__ ) _lowerCAmelCase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case__ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.get_image_processor() _lowerCAmelCase : Optional[Any] = self.get_tokenizer() _lowerCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : str = self.prepare_image_inputs() _lowerCAmelCase : Dict = image_processor(snake_case__ , return_tensors='np' ) _lowerCAmelCase : Tuple = processor(images=snake_case__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.get_image_processor() _lowerCAmelCase : List[Any] = self.get_tokenizer() _lowerCAmelCase : Any = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : List[Any] = 'Alexandra,T-shirt的价格是15便士。' _lowerCAmelCase : List[Any] = processor(text=snake_case__ ) _lowerCAmelCase : List[Any] = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.get_image_processor() _lowerCAmelCase : Tuple = self.get_tokenizer() _lowerCAmelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : int = 'Alexandra,T-shirt的价格是15便士。' _lowerCAmelCase : List[str] = self.prepare_image_inputs() _lowerCAmelCase : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_image_processor() _lowerCAmelCase : Optional[Any] = self.get_tokenizer() _lowerCAmelCase : int = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase : str = processor.batch_decode(snake_case__ ) _lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.get_image_processor() _lowerCAmelCase : Optional[Any] = self.get_tokenizer() _lowerCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : str = 'Alexandra,T-shirt的价格是15便士。' _lowerCAmelCase : Tuple = self.prepare_image_inputs() _lowerCAmelCase : List[Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = ["torch", "torchsde"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ['torch', 'torchsde'] ) @classmethod def a ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ['torch', 'torchsde'] ) @classmethod def a ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ['torch', 'torchsde'] )
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.0 , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = "layer_norm" , snake_case__ = False , ): '''simple docstring''' super().__init__() _lowerCAmelCase : Optional[int] = only_cross_attention _lowerCAmelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' _lowerCAmelCase : List[str] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to' F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _lowerCAmelCase : List[Any] = AdaLayerNorm(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: _lowerCAmelCase : List[Any] = AdaLayerNormZero(snake_case__ , snake_case__ ) else: _lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) _lowerCAmelCase : Optional[Any] = Attention( query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _lowerCAmelCase : Optional[Any] = ( AdaLayerNorm(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) ) _lowerCAmelCase : List[Any] = Attention( query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none else: _lowerCAmelCase : Union[str, Any] = None _lowerCAmelCase : int = None # 3. Feed-forward _lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) _lowerCAmelCase : Union[str, Any] = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ ) # let chunk size default to None _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Union[str, Any] = 0 def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = chunk_size _lowerCAmelCase : Tuple = dim def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ): '''simple docstring''' if self.use_ada_layer_norm: _lowerCAmelCase : Union[str, Any] = self.norma(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = self.norma( snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype ) else: _lowerCAmelCase : Union[str, Any] = self.norma(snake_case__ ) _lowerCAmelCase : str = cross_attention_kwargs if cross_attention_kwargs is not None else {} _lowerCAmelCase : Union[str, Any] = self.attna( snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : int = gate_msa.unsqueeze(1 ) * attn_output _lowerCAmelCase : str = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _lowerCAmelCase : List[str] = ( self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ ) ) _lowerCAmelCase : Optional[int] = self.attna( snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[int] = attn_output + hidden_states # 3. Feed-forward _lowerCAmelCase : Optional[Any] = self.norma(snake_case__ ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' ) _lowerCAmelCase : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _lowerCAmelCase : str = torch.cat( [self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: _lowerCAmelCase : Tuple = self.ff(snake_case__ ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : int = gate_mlp.unsqueeze(1 ) * ff_output _lowerCAmelCase : Dict = ff_output + hidden_states return hidden_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = "geglu" , snake_case__ = False , ): '''simple docstring''' super().__init__() _lowerCAmelCase : Optional[Any] = int(dim * mult ) _lowerCAmelCase : Optional[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": _lowerCAmelCase : Tuple = GELU(snake_case__ , snake_case__ ) if activation_fn == "gelu-approximate": _lowerCAmelCase : Any = GELU(snake_case__ , snake_case__ , approximate='tanh' ) elif activation_fn == "geglu": _lowerCAmelCase : Tuple = GEGLU(snake_case__ , snake_case__ ) elif activation_fn == "geglu-approximate": _lowerCAmelCase : Any = ApproximateGELU(snake_case__ , snake_case__ ) _lowerCAmelCase : Union[str, Any] = nn.ModuleList([] ) # project in self.net.append(snake_case__ ) # project dropout self.net.append(nn.Dropout(snake_case__ ) ) # project out self.net.append(nn.Linear(snake_case__ , snake_case__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(snake_case__ ) ) def a ( self , snake_case__ ): '''simple docstring''' for module in self.net: _lowerCAmelCase : List[Any] = module(snake_case__ ) return hidden_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = "none" ): '''simple docstring''' super().__init__() _lowerCAmelCase : str = nn.Linear(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = approximate def a ( self , snake_case__ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(snake_case__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.proj(snake_case__ ) _lowerCAmelCase : str = self.gelu(snake_case__ ) return hidden_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : Tuple = nn.Linear(snake_case__ , dim_out * 2 ) def a ( self , snake_case__ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(snake_case__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.proj(snake_case__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(snake_case__ ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = nn.Linear(snake_case__ , snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.proj(snake_case__ ) return x * torch.sigmoid(1.702 * x ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : Optional[Any] = nn.Embedding(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = nn.SiLU() _lowerCAmelCase : Optional[int] = nn.Linear(snake_case__ , embedding_dim * 2 ) _lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.linear(self.silu(self.emb(snake_case__ ) ) ) _lowerCAmelCase , _lowerCAmelCase : List[Any] = torch.chunk(snake_case__ , 2 ) _lowerCAmelCase : Optional[Any] = self.norm(snake_case__ ) * (1 + scale) + shift return x class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : Union[str, Any] = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ ) _lowerCAmelCase : Dict = nn.SiLU() _lowerCAmelCase : Tuple = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ ) _lowerCAmelCase : Optional[Any] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6 ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : List[str] = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = emb.chunk(6 , dim=1 ) _lowerCAmelCase : Any = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 1E-5 ): '''simple docstring''' super().__init__() _lowerCAmelCase : Dict = num_groups _lowerCAmelCase : Dict = eps if act_fn is None: _lowerCAmelCase : Union[str, Any] = None else: _lowerCAmelCase : List[Any] = get_activation(snake_case__ ) _lowerCAmelCase : int = nn.Linear(snake_case__ , out_dim * 2 ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if self.act: _lowerCAmelCase : List[Any] = self.act(snake_case__ ) _lowerCAmelCase : Optional[int] = self.linear(snake_case__ ) _lowerCAmelCase : str = emb[:, :, None, None] _lowerCAmelCase , _lowerCAmelCase : Any = emb.chunk(2 , dim=1 ) _lowerCAmelCase : int = F.group_norm(snake_case__ , self.num_groups , eps=self.eps ) _lowerCAmelCase : List[Any] = x * (1 + scale) + shift return x
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=33 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : str = parent _lowerCAmelCase : List[str] = batch_size _lowerCAmelCase : Dict = seq_length _lowerCAmelCase : int = is_training _lowerCAmelCase : str = use_input_mask _lowerCAmelCase : Optional[int] = use_token_type_ids _lowerCAmelCase : Any = use_labels _lowerCAmelCase : List[str] = vocab_size _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : Optional[int] = num_hidden_layers _lowerCAmelCase : Optional[int] = num_attention_heads _lowerCAmelCase : Dict = intermediate_size _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : List[str] = hidden_dropout_prob _lowerCAmelCase : int = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : List[Any] = type_vocab_size _lowerCAmelCase : Optional[Any] = type_sequence_label_size _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : str = num_labels _lowerCAmelCase : Optional[int] = num_choices _lowerCAmelCase : Dict = scope def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : List[str] = None if self.use_input_mask: _lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : List[str] = None _lowerCAmelCase : List[str] = None if self.use_labels: _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self ): '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = EsmModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ ) _lowerCAmelCase : Any = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = EsmForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = self.num_labels _lowerCAmelCase : Dict = EsmForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : List[str] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = config_and_inputs _lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = False __magic_name__ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) __magic_name__ = () __magic_name__ = ( { "feature-extraction": EsmModel, "fill-mask": EsmForMaskedLM, "text-classification": EsmForSequenceClassification, "token-classification": EsmForTokenClassification, "zero-shot": EsmForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = EsmModelTester(self ) _lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : int = type self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Dict = EsmModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()[0] _lowerCAmelCase : Dict = EsmEmbeddings(config=snake_case__ ) _lowerCAmelCase : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) _lowerCAmelCase : List[str] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) _lowerCAmelCase : Optional[Any] = create_position_ids_from_input_ids(snake_case__ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()[0] _lowerCAmelCase : Any = EsmEmbeddings(config=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.empty(2 , 4 , 30 ) _lowerCAmelCase : Dict = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] _lowerCAmelCase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) _lowerCAmelCase : Optional[int] = embeddings.create_position_ids_from_inputs_embeds(snake_case__ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a ( self ): '''simple docstring''' pass @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @slow def a ( self ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase : Optional[int] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _lowerCAmelCase : str = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _lowerCAmelCase : Tuple = model(snake_case__ )[0] _lowerCAmelCase : int = 33 _lowerCAmelCase : Dict = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , snake_case__ ) _lowerCAmelCase : Optional[int] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase : List[str] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _lowerCAmelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _lowerCAmelCase : Dict = model(snake_case__ )[0] # compare the actual values for a slice. _lowerCAmelCase : str = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" return 1_0 - x * x def lowercase (_A , _A ): """simple docstring""" if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) _lowerCAmelCase : List[str] = a while (b - a) >= 0.01: # Find middle point _lowerCAmelCase : Optional[int] = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: _lowerCAmelCase : Optional[int] = c else: _lowerCAmelCase : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=0.9 , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 30} _lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else {'height': 30, 'width': 30} _lowerCAmelCase : Dict = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : Dict = min_resolution _lowerCAmelCase : Dict = max_resolution _lowerCAmelCase : str = do_resize_and_center_crop _lowerCAmelCase : Dict = size _lowerCAmelCase : Optional[Any] = crop_pct _lowerCAmelCase : Optional[int] = crop_size _lowerCAmelCase : Optional[int] = do_normalize _lowerCAmelCase : str = image_mean _lowerCAmelCase : int = image_std def a ( self ): '''simple docstring''' return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = PoolFormerImageProcessor if is_vision_available() else None def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = PoolFormerImageProcessingTester(self ) @property def a ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(snake_case__ , 'size' ) ) self.assertTrue(hasattr(snake_case__ , 'crop_pct' ) ) self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case__ , 'image_mean' ) ) self.assertTrue(hasattr(snake_case__ , 'image_std' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input _lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Optional[Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input _lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input _lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : str = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Any = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "luke" def __init__( self , snake_case__=5_0267 , snake_case__=50_0000 , snake_case__=768 , snake_case__=256 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=True , snake_case__=None , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : Union[str, Any] = vocab_size _lowerCAmelCase : Any = entity_vocab_size _lowerCAmelCase : Optional[Any] = hidden_size _lowerCAmelCase : Union[str, Any] = entity_emb_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Union[str, Any] = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Optional[int] = intermediate_size _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : str = attention_probs_dropout_prob _lowerCAmelCase : Tuple = max_position_embeddings _lowerCAmelCase : int = type_vocab_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : Union[str, Any] = layer_norm_eps _lowerCAmelCase : Any = use_entity_aware_attention _lowerCAmelCase : Union[str, Any] = classifier_dropout
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' from __future__ import annotations class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(snake_case__ ) != 0: _lowerCAmelCase : int = len(rows[0] ) if cols == 0: raise error for row in rows: if len(snake_case__ ) != cols: raise error for value in row: if not isinstance(snake_case__ , (int, float) ): raise error _lowerCAmelCase : Tuple = rows else: _lowerCAmelCase : Dict = [] def a ( self ): '''simple docstring''' return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def a ( self ): '''simple docstring''' return len(self.rows ) @property def a ( self ): '''simple docstring''' return len(self.rows[0] ) @property def a ( self ): '''simple docstring''' return (self.num_rows, self.num_columns) @property def a ( self ): '''simple docstring''' return self.order[0] == self.order[1] def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(snake_case__ ) def a ( self ): '''simple docstring''' if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def a ( self ): '''simple docstring''' return bool(self.determinant() ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(snake_case__ ).determinant() def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if (row + column) % 2 == 0: return self.get_minor(snake_case__ , snake_case__ ) return -1 * self.get_minor(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' return Matrix( [ [self.get_minor(snake_case__ , snake_case__ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def a ( self ): '''simple docstring''' return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self ): '''simple docstring''' return str(self.rows ) def __str__( self ): '''simple docstring''' if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(snake_case__ ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(snake_case__ , snake_case__ ): raise type_error for value in row: if not isinstance(snake_case__ , (int, float) ): raise type_error if len(snake_case__ ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(snake_case__ ) else: _lowerCAmelCase : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(snake_case__ , snake_case__ ): raise type_error for value in column: if not isinstance(snake_case__ , (int, float) ): raise type_error if len(snake_case__ ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: _lowerCAmelCase : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: _lowerCAmelCase : Union[str, Any] = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): return NotImplemented return self.rows == other.rows def __ne__( self , snake_case__ ): '''simple docstring''' return not self == other def __neg__( self ): '''simple docstring''' return self * -1 def __add__( self , snake_case__ ): '''simple docstring''' if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , snake_case__ ): '''simple docstring''' if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , snake_case__ ): '''simple docstring''' if isinstance(snake_case__ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(snake_case__ , snake_case__ ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(snake_case__ , snake_case__ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) _lowerCAmelCase : List[Any] = self for _ in range(other - 1 ): result *= self return result @classmethod def a ( cls , snake_case__ , snake_case__ ): '''simple docstring''' return sum(row[i] * column[i] for i in range(len(snake_case__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' lowerCAmelCase : Dict = [ """DownloadConfig""", """DownloadManager""", """DownloadMode""", """StreamingDownloadManager""", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowercase (): """simple docstring""" _lowerCAmelCase : Dict = [randint(-1_0_0_0 , 1_0_0_0 ) for i in range(1_0 )] _lowerCAmelCase : Any = randint(-5_0_0_0 , 5_0_0_0 ) return (arr, r) lowerCAmelCase : Optional[int] = make_dataset() def lowercase (_A , _A ): """simple docstring""" for triplet in permutations(_A , 3 ): if sum(_A ) == target: return tuple(sorted(_A ) ) return (0, 0, 0) def lowercase (_A , _A ): """simple docstring""" arr.sort() _lowerCAmelCase : Optional[int] = len(_A ) for i in range(n - 1 ): _lowerCAmelCase , _lowerCAmelCase : Any = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowercase (): """simple docstring""" _lowerCAmelCase : List[str] = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' _lowerCAmelCase : Union[str, Any] = '\ntriplet_sum1(*dataset)\n' _lowerCAmelCase : List[Any] = '\ntriplet_sum2(*dataset)\n' _lowerCAmelCase : Dict = repeat(setup=_A , stmt=_A , repeat=5 , number=1_0_0_0_0 ) _lowerCAmelCase : str = repeat(setup=_A , stmt=_A , repeat=5 , number=1_0_0_0_0 ) return (min(_A ), min(_A )) if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase : List[str] = solution_times() print(F'''The time for naive implementation is {times[0]}.''') print(F'''The time for optimized implementation is {times[1]}.''')
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = len(_A ) _lowerCAmelCase : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _lowerCAmelCase : Dict = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): _lowerCAmelCase : int = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: _lowerCAmelCase : List[Any] = subset[i - 1][j] if arr[i - 1] <= j: _lowerCAmelCase : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "bert-generation" def __init__( self , snake_case__=5_0358 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__=2 , snake_case__=1 , snake_case__="absolute" , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = vocab_size _lowerCAmelCase : int = hidden_size _lowerCAmelCase : Any = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Tuple = hidden_dropout_prob _lowerCAmelCase : List[str] = attention_probs_dropout_prob _lowerCAmelCase : Optional[int] = max_position_embeddings _lowerCAmelCase : Any = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : List[str] = position_embedding_type _lowerCAmelCase : Any = use_cache
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__ , 'tf_padding' ) ) self.parent.assertTrue(hasattr(snake_case__ , 'depth_multiplier' ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=3 , snake_case__=32 , snake_case__=0.25 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=1280 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : str = parent _lowerCAmelCase : Tuple = batch_size _lowerCAmelCase : Dict = num_channels _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : int = depth_multiplier _lowerCAmelCase : int = depth_divisible_by _lowerCAmelCase : str = min_depth _lowerCAmelCase : Any = expand_ratio _lowerCAmelCase : int = tf_padding _lowerCAmelCase : Dict = output_stride _lowerCAmelCase : List[str] = first_layer_is_expansion _lowerCAmelCase : Tuple = finegrained_output _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Tuple = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) _lowerCAmelCase : Any = classifier_dropout_prob _lowerCAmelCase : int = use_labels _lowerCAmelCase : List[Any] = is_training _lowerCAmelCase : List[str] = num_labels _lowerCAmelCase : Dict = initializer_range _lowerCAmelCase : Union[str, Any] = scope def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Union[str, Any] = None _lowerCAmelCase : Union[str, Any] = None if self.use_labels: _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def a ( self ): '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MobileNetVaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : List[str] = MobileNetVaForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : List[str] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : List[str] = MobileNetVaForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs _lowerCAmelCase : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = MobileNetVaModelTester(self ) _lowerCAmelCase : Dict = MobileNetVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV2 does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileNetV2 does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileNetV2 does not output attentions' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Dict = model_class(snake_case__ ) _lowerCAmelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Any = [*signature.parameters.keys()] _lowerCAmelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : List[str] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : Optional[int] = outputs.hidden_states _lowerCAmelCase : Dict = 16 self.assertEqual(len(snake_case__ ) , snake_case__ ) _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : str = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = MobileNetVaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(snake_case__ ) _lowerCAmelCase : Any = self.default_image_processor _lowerCAmelCase : Tuple = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Union[str, Any] = model(**snake_case__ ) # verify the logits _lowerCAmelCase : int = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) _lowerCAmelCase : Union[str, Any] = model.to(snake_case__ ) _lowerCAmelCase : str = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) _lowerCAmelCase : int = prepare_img() _lowerCAmelCase : Tuple = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : str = model(**snake_case__ ) _lowerCAmelCase : int = outputs.logits # verify the logits _lowerCAmelCase : int = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , snake_case__ ) _lowerCAmelCase : List[str] = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def lowercase (_A ): """simple docstring""" if not is_accelerate_available(): return method _lowerCAmelCase : Tuple = version.parse(accelerate.__version__ ).base_version if version.parse(_A ) < version.parse('0.17.0' ): return method def wrapper(self , *_A , **_A ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *_A , **_A ) return wrapper
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase (_A ): """simple docstring""" if "model" in orig_key: _lowerCAmelCase : Union[str, Any] = orig_key.replace('model.' , '' ) if "norm1" in orig_key: _lowerCAmelCase : int = orig_key.replace('norm1' , 'attention.output.LayerNorm' ) if "norm2" in orig_key: _lowerCAmelCase : Tuple = orig_key.replace('norm2' , 'output.LayerNorm' ) if "norm" in orig_key: _lowerCAmelCase : int = orig_key.replace('norm' , 'LayerNorm' ) if "transformer" in orig_key: _lowerCAmelCase : Optional[int] = orig_key.split('.' )[0].split('_' )[-1] _lowerCAmelCase : Union[str, Any] = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' ) if "mha.attn" in orig_key: _lowerCAmelCase : str = orig_key.replace('mha.attn' , 'attention.self' ) if "mha" in orig_key: _lowerCAmelCase : List[Any] = orig_key.replace('mha' , 'attention' ) if "W_q" in orig_key: _lowerCAmelCase : Optional[int] = orig_key.replace('W_q' , 'self.query' ) if "W_k" in orig_key: _lowerCAmelCase : Dict = orig_key.replace('W_k' , 'self.key' ) if "W_v" in orig_key: _lowerCAmelCase : Optional[Any] = orig_key.replace('W_v' , 'self.value' ) if "ff1" in orig_key: _lowerCAmelCase : Dict = orig_key.replace('ff1' , 'intermediate.dense' ) if "ff2" in orig_key: _lowerCAmelCase : List[str] = orig_key.replace('ff2' , 'output.dense' ) if "ff" in orig_key: _lowerCAmelCase : str = orig_key.replace('ff' , 'output.dense' ) if "mlm_class" in orig_key: _lowerCAmelCase : int = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' ) if "mlm" in orig_key: _lowerCAmelCase : Any = orig_key.replace('mlm' , 'cls.predictions.transform' ) if "cls" not in orig_key: _lowerCAmelCase : Any = 'yoso.' + orig_key return orig_key def lowercase (_A , _A ): """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_A ) if ("pooler" in key) or ("sen_class" in key): continue else: _lowerCAmelCase : Optional[int] = val _lowerCAmelCase : Union[str, Any] = orig_state_dict['cls.predictions.decoder.bias'] _lowerCAmelCase : List[Any] = torch.arange(_A ).expand((1, -1) ) + 2 return orig_state_dict def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Any = torch.load(_A , map_location='cpu' )['model_state_dict'] _lowerCAmelCase : Union[str, Any] = YosoConfig.from_json_file(_A ) _lowerCAmelCase : int = YosoForMaskedLM(_A ) _lowerCAmelCase : List[str] = convert_checkpoint_helper(config.max_position_embeddings , _A ) print(model.load_state_dict(_A ) ) model.eval() model.save_pretrained(_A ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) if __name__ == "__main__": lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : int = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) lowerCAmelCase : List[str] = """bert-base-cased""" lowerCAmelCase : Dict = """fp16""" lowerCAmelCase : Any = """bf16""" lowerCAmelCase : Any = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : List[str] = dict( ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case__ ): _lowerCAmelCase : List[str] = self.dist_env.copy() _lowerCAmelCase : Optional[Any] = F'{i + 1}' _lowerCAmelCase : Optional[Any] = strategy with mockenv_context(**snake_case__ ): _lowerCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case__ ): _lowerCAmelCase : str = self.dist_env.copy() _lowerCAmelCase : str = prefetch_policy with mockenv_context(**snake_case__ ): _lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case__ ): _lowerCAmelCase : List[str] = self.dist_env.copy() _lowerCAmelCase : str = state_dict_type with mockenv_context(**snake_case__ ): _lowerCAmelCase : str = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = AutoModel.from_pretrained(snake_case__ ) for policy in FSDP_AUTO_WRAP_POLICY: _lowerCAmelCase : List[str] = self.dist_env.copy() _lowerCAmelCase : int = policy if policy == "TRANSFORMER_BASED_WRAP": _lowerCAmelCase : str = 'BertLayer' elif policy == "SIZE_BASED_WRAP": _lowerCAmelCase : List[Any] = '2000' with mockenv_context(**snake_case__ ): _lowerCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case__ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) _lowerCAmelCase : Any = self.dist_env.copy() _lowerCAmelCase : str = 'TRANSFORMER_BASED_WRAP' _lowerCAmelCase : Optional[Any] = 'T5Layer' with mockenv_context(**snake_case__ ): _lowerCAmelCase : int = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case__ ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case__ ) self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) ) _lowerCAmelCase : List[Any] = self.dist_env.copy() _lowerCAmelCase : Optional[int] = 'SIZE_BASED_WRAP' _lowerCAmelCase : List[str] = '0' with mockenv_context(**snake_case__ ): _lowerCAmelCase : Tuple = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case__ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: _lowerCAmelCase : Dict = self.dist_env.copy() _lowerCAmelCase : int = mp_dtype with mockenv_context(**snake_case__ ): _lowerCAmelCase : List[str] = Accelerator() if mp_dtype == "fp16": _lowerCAmelCase : Any = torch.floataa elif mp_dtype == "bf16": _lowerCAmelCase : List[Any] = torch.bfloataa _lowerCAmelCase : Tuple = MixedPrecision(param_dtype=snake_case__ , reduce_dtype=snake_case__ , buffer_dtype=snake_case__ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case__ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , snake_case__ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case__ ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: _lowerCAmelCase : List[Any] = self.dist_env.copy() _lowerCAmelCase : Dict = str(snake_case__ ).lower() with mockenv_context(**snake_case__ ): _lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case__ ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : List[str] = 0.82 _lowerCAmelCase : int = [ 'fsdp_shard_grad_op_transformer_based_wrap', 'fsdp_full_shard_transformer_based_wrap', ] _lowerCAmelCase : Any = { 'multi_gpu_fp16': 3200, 'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000, 'fsdp_full_shard_transformer_based_wrap_fp16': 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } _lowerCAmelCase : str = 160 _lowerCAmelCase : Any = 160 _lowerCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils ) _lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = os.path.join(self.test_scripts_folder , 'test_performance.py' ) _lowerCAmelCase : Dict = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp'] for config in self.performance_configs: _lowerCAmelCase : List[Any] = cmd.copy() for i, strategy in enumerate(snake_case__ ): if strategy.lower() in config: cmd_config.append(F'--fsdp_sharding_strategy={i+1}' ) break if "fp32" in config: cmd_config.append('--mixed_precision=no' ) else: cmd_config.append('--mixed_precision=fp16' ) if "cpu_offload" in config: cmd_config.append('--fsdp_offload_params=True' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('--fsdp_min_num_params=2000' ) cmd_config.extend( [ self.test_file_path, F'--output_dir={self.tmpdir}', F'--performance_lower_bound={self.performance_lower_bound}', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' ) _lowerCAmelCase : Any = [ 'accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp', '--mixed_precision=fp16', '--fsdp_transformer_layer_cls_to_wrap=BertLayer', ] for i, strategy in enumerate(snake_case__ ): _lowerCAmelCase : List[str] = cmd.copy() cmd_config.append(F'--fsdp_sharding_strategy={i+1}' ) if strategy != "FULL_SHARD": continue _lowerCAmelCase : Any = len(snake_case__ ) for state_dict_type in FSDP_STATE_DICT_TYPE: _lowerCAmelCase : List[Any] = cmd_config[:state_dict_config_index] cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' ) cmd_config.extend( [ self.test_file_path, F'--output_dir={self.tmpdir}', '--partial_train_epoch=1', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) _lowerCAmelCase : Any = cmd_config[:-1] _lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdir , 'epoch_0' ) cmd_config.extend( [ F'--resume_from_checkpoint={resume_from_checkpoint}', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' ) _lowerCAmelCase : Dict = [ 'accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): _lowerCAmelCase : str = cmd.copy() if "fp16" in spec: cmd_config.extend(['--mixed_precision=fp16'] ) else: cmd_config.extend(['--mixed_precision=no'] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['--use_fsdp'] ) for i, strategy in enumerate(snake_case__ ): if strategy.lower() in spec: cmd_config.append(F'--fsdp_sharding_strategy={i+1}' ) break if "cpu_offload" in spec: cmd_config.append('--fsdp_offload_params=True' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('--fsdp_min_num_params=2000' ) cmd_config.extend( [ self.test_file_path, F'--output_dir={self.tmpdir}', F'--peak_memory_upper_bound={peak_mem_upper_bound}', F'--n_train={self.n_train}', F'--n_val={self.n_val}', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() )
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import re from filelock import FileLock try: import nltk lowerCAmelCase : Dict = True except (ImportError, ModuleNotFoundError): lowerCAmelCase : str = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def lowercase (_A ): """simple docstring""" re.sub('<n>' , '' , _A ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_A ) )
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Tuple = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mctct" def __init__( self , snake_case__=8065 , snake_case__=1536 , snake_case__=36 , snake_case__=6144 , snake_case__=4 , snake_case__=384 , snake_case__=920 , snake_case__=1E-5 , snake_case__=0.3 , snake_case__="relu" , snake_case__=0.02 , snake_case__=0.3 , snake_case__=0.3 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=1 , snake_case__=0.3 , snake_case__=1 , snake_case__=(7,) , snake_case__=(3,) , snake_case__=80 , snake_case__=1 , snake_case__=None , snake_case__="sum" , snake_case__=False , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) _lowerCAmelCase : Dict = vocab_size _lowerCAmelCase : Any = hidden_size _lowerCAmelCase : List[Any] = num_hidden_layers _lowerCAmelCase : List[str] = intermediate_size _lowerCAmelCase : Union[str, Any] = num_attention_heads _lowerCAmelCase : Dict = attention_head_dim _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Any = layer_norm_eps _lowerCAmelCase : Optional[Any] = layerdrop _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Optional[int] = pad_token_id _lowerCAmelCase : Optional[Any] = bos_token_id _lowerCAmelCase : Dict = eos_token_id _lowerCAmelCase : Optional[int] = conv_glu_dim _lowerCAmelCase : List[str] = conv_dropout _lowerCAmelCase : Tuple = num_conv_layers _lowerCAmelCase : Optional[Any] = input_feat_per_channel _lowerCAmelCase : Union[str, Any] = input_channels _lowerCAmelCase : Optional[int] = conv_channels _lowerCAmelCase : Optional[int] = ctc_loss_reduction _lowerCAmelCase : Any = ctc_zero_infinity # prevents config testing fail with exporting to json _lowerCAmelCase : Union[str, Any] = list(snake_case__ ) _lowerCAmelCase : Dict = list(snake_case__ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ' F'`config.num_conv_layers = {self.num_conv_layers}`.' )
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self ): '''simple docstring''' _lowerCAmelCase : Dict = {} def a ( self , snake_case__ , snake_case__ , snake_case__=1 ): '''simple docstring''' if self.graph.get(snake_case__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _lowerCAmelCase : List[str] = [[w, v]] if not self.graph.get(snake_case__ ): _lowerCAmelCase : Any = [] def a ( self ): '''simple docstring''' return list(self.graph ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if self.graph.get(snake_case__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(snake_case__ ) def a ( self , snake_case__=-2 , snake_case__=-1 ): '''simple docstring''' if s == d: return [] _lowerCAmelCase : Any = [] _lowerCAmelCase : str = [] if s == -2: _lowerCAmelCase : Union[str, Any] = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : List[Any] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(snake_case__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(snake_case__ ) != 0: _lowerCAmelCase : Dict = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : int = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return visited def a ( self , snake_case__=-1 ): '''simple docstring''' if c == -1: _lowerCAmelCase : str = floor(random() * 1_0000 ) + 10 for i in range(snake_case__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _lowerCAmelCase : List[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(snake_case__ , snake_case__ , 1 ) def a ( self , snake_case__=-2 ): '''simple docstring''' _lowerCAmelCase : Tuple = deque() _lowerCAmelCase : Any = [] if s == -2: _lowerCAmelCase : Dict = list(self.graph )[0] d.append(snake_case__ ) visited.append(snake_case__ ) while d: _lowerCAmelCase : Optional[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def a ( self , snake_case__ ): '''simple docstring''' return len(self.graph[u] ) def a ( self , snake_case__=-2 ): '''simple docstring''' _lowerCAmelCase : Any = [] _lowerCAmelCase : Optional[int] = [] if s == -2: _lowerCAmelCase : Tuple = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : Any = s _lowerCAmelCase : Dict = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : Tuple = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : str = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(snake_case__ ) != 0: _lowerCAmelCase : List[str] = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : Optional[Any] = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return sorted_nodes def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : str = [] _lowerCAmelCase : Tuple = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : Any = -2 _lowerCAmelCase : str = [] _lowerCAmelCase : Optional[int] = s _lowerCAmelCase : List[str] = False _lowerCAmelCase : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : Dict = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase : int = len(snake_case__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase : Tuple = True if len(snake_case__ ) != 0: _lowerCAmelCase : Dict = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : Any = False indirect_parents.append(snake_case__ ) _lowerCAmelCase : str = s _lowerCAmelCase : Optional[int] = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return list(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = [] _lowerCAmelCase : str = [] _lowerCAmelCase : List[Any] = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : Dict = -2 _lowerCAmelCase : Any = [] _lowerCAmelCase : List[str] = s _lowerCAmelCase : str = False _lowerCAmelCase : int = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : Tuple = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase : Optional[Any] = len(snake_case__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : List[str] = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase : int = True if len(snake_case__ ) != 0: _lowerCAmelCase : Dict = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : List[Any] = False indirect_parents.append(snake_case__ ) _lowerCAmelCase : Any = s _lowerCAmelCase : str = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return False def a ( self , snake_case__=-2 , snake_case__=-1 ): '''simple docstring''' _lowerCAmelCase : Tuple = time() self.dfs(snake_case__ , snake_case__ ) _lowerCAmelCase : Any = time() return end - begin def a ( self , snake_case__=-2 ): '''simple docstring''' _lowerCAmelCase : Dict = time() self.bfs(snake_case__ ) _lowerCAmelCase : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = {} def a ( self , snake_case__ , snake_case__ , snake_case__=1 ): '''simple docstring''' if self.graph.get(snake_case__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _lowerCAmelCase : Any = [[w, v]] # add the other way if self.graph.get(snake_case__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _lowerCAmelCase : str = [[w, u]] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if self.graph.get(snake_case__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(snake_case__ ) # the other way round if self.graph.get(snake_case__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(snake_case__ ) def a ( self , snake_case__=-2 , snake_case__=-1 ): '''simple docstring''' if s == d: return [] _lowerCAmelCase : Dict = [] _lowerCAmelCase : int = [] if s == -2: _lowerCAmelCase : int = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : int = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(snake_case__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(snake_case__ ) != 0: _lowerCAmelCase : List[str] = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : int = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return visited def a ( self , snake_case__=-1 ): '''simple docstring''' if c == -1: _lowerCAmelCase : Tuple = floor(random() * 1_0000 ) + 10 for i in range(snake_case__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _lowerCAmelCase : Optional[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(snake_case__ , snake_case__ , 1 ) def a ( self , snake_case__=-2 ): '''simple docstring''' _lowerCAmelCase : Optional[int] = deque() _lowerCAmelCase : Union[str, Any] = [] if s == -2: _lowerCAmelCase : Any = list(self.graph )[0] d.append(snake_case__ ) visited.append(snake_case__ ) while d: _lowerCAmelCase : Any = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def a ( self , snake_case__ ): '''simple docstring''' return len(self.graph[u] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = [] _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : Dict = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : Optional[int] = -2 _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : List[str] = s _lowerCAmelCase : Union[str, Any] = False _lowerCAmelCase : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase : List[Any] = len(snake_case__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : Union[str, Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase : str = True if len(snake_case__ ) != 0: _lowerCAmelCase : str = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : List[str] = False indirect_parents.append(snake_case__ ) _lowerCAmelCase : List[Any] = s _lowerCAmelCase : int = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return list(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = [] _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : List[Any] = list(self.graph )[0] stack.append(snake_case__ ) visited.append(snake_case__ ) _lowerCAmelCase : Optional[int] = -2 _lowerCAmelCase : Optional[int] = [] _lowerCAmelCase : List[str] = s _lowerCAmelCase : List[Any] = False _lowerCAmelCase : List[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase : List[str] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase : int = len(snake_case__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase : str = True if len(snake_case__ ) != 0: _lowerCAmelCase : List[Any] = stack[len(snake_case__ ) - 1] else: _lowerCAmelCase : Union[str, Any] = False indirect_parents.append(snake_case__ ) _lowerCAmelCase : Optional[Any] = s _lowerCAmelCase : Dict = ss # check if se have reached the starting point if len(snake_case__ ) == 0: return False def a ( self ): '''simple docstring''' return list(self.graph ) def a ( self , snake_case__=-2 , snake_case__=-1 ): '''simple docstring''' _lowerCAmelCase : Optional[int] = time() self.dfs(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = time() return end - begin def a ( self , snake_case__=-2 ): '''simple docstring''' _lowerCAmelCase : List[str] = time() self.bfs(snake_case__ ) _lowerCAmelCase : str = time() return end - begin
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar lowerCAmelCase : str = TypeVar("""T""") class UpperCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists _lowerCAmelCase : Any = directed def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(snake_case__ ) self.adj_list[destination_vertex].append(snake_case__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(snake_case__ ) _lowerCAmelCase : Tuple = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(snake_case__ ) _lowerCAmelCase : int = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _lowerCAmelCase : Union[str, Any] = [destination_vertex] _lowerCAmelCase : List[Any] = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(snake_case__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(snake_case__ ) _lowerCAmelCase : Any = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _lowerCAmelCase : int = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _lowerCAmelCase : Tuple = [destination_vertex] _lowerCAmelCase : List[str] = [] return self def __repr__( self ): '''simple docstring''' return pformat(self.adj_list )
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCAmelCase : Optional[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } lowerCAmelCase : Any = { """facebook/blenderbot_small-90M""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = BlenderbotSmallTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[int] = add_prefix_space def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[str] = [self.sep_token_id] _lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' import mpmath # for roots of unity import numpy as np class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__=None , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Optional[int] = list(poly_a or [0] )[:] _lowerCAmelCase : Union[str, Any] = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCAmelCase : int = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _lowerCAmelCase : Dict = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _lowerCAmelCase : Dict = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _lowerCAmelCase : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _lowerCAmelCase : Union[str, Any] = self.__multiply() def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(snake_case__ ) <= 1: return dft[0] # _lowerCAmelCase : Union[str, Any] = self.c_max_length // 2 while next_ncol > 0: _lowerCAmelCase : List[str] = [[] for i in range(snake_case__ )] _lowerCAmelCase : str = self.root**next_ncol # First half of next step _lowerCAmelCase : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(snake_case__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _lowerCAmelCase : List[str] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(snake_case__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _lowerCAmelCase : Optional[int] = new_dft _lowerCAmelCase : Dict = next_ncol // 2 return dft[0] def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dft('A' ) _lowerCAmelCase : int = self.__dft('B' ) _lowerCAmelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _lowerCAmelCase : List[Any] = 2 while next_ncol <= self.c_max_length: _lowerCAmelCase : Optional[int] = [[] for i in range(snake_case__ )] _lowerCAmelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCAmelCase : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _lowerCAmelCase : Tuple = new_inverse_c next_ncol *= 2 # Unpack _lowerCAmelCase : Dict = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'A = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _lowerCAmelCase : Optional[int] = 'B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _lowerCAmelCase : int = 'A*B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product ) ) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" if not (isinstance(_A , _A ) and isinstance(_A , _A )): raise ValueError('longest_common_substring() takes two strings for inputs' ) _lowerCAmelCase : Optional[Any] = len(_A ) _lowerCAmelCase : str = len(_A ) _lowerCAmelCase : str = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : str = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _lowerCAmelCase : List[Any] = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _lowerCAmelCase : Union[str, Any] = i _lowerCAmelCase : Dict = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowerCAmelCase : Tuple = logging.getLogger() def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = '\n'.join(_A ) Path(_A ).open('w' ).writelines(_A ) lowerCAmelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" lowerCAmelCase : List[Any] = """sshleifer/bart-tiny-random""" lowerCAmelCase : Optional[Any] = """sshleifer/tiny-mbart""" lowerCAmelCase : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _lowerCAmelCase : Optional[Any] = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _lowerCAmelCase : Any = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case__ , snake_case__ ) _lowerCAmelCase : str = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _lowerCAmelCase : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization' _lowerCAmelCase : List[Any] = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case__ , 'argv' , snake_case__ ): run_generate() assert Path(snake_case__ ).exists() # os.remove(Path(output_file_name)) def a ( self ): '''simple docstring''' self.run_eval_tester(snake_case__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def a ( self , snake_case__ ): '''simple docstring''' self.run_eval_tester(snake_case__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _lowerCAmelCase : Tuple = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _lowerCAmelCase : str = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _lowerCAmelCase : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) _lowerCAmelCase : List[Any] = str(tmp_dir / 'scores.json' ) _lowerCAmelCase : List[Any] = str(tmp_dir / 'val.target' ) _dump_articles(snake_case__ , text['en'] ) _dump_articles(snake_case__ , text['de'] ) _lowerCAmelCase : Union[str, Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization' _lowerCAmelCase : List[Any] = F'\n run_eval_search.py\n {model}\n {str(snake_case__ )}\n {str(snake_case__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case__ , 'argv' , snake_case__ ): with CaptureStdout() as cs: run_search() _lowerCAmelCase : int = [' num_beams | length_penalty', model, 'Best score args'] _lowerCAmelCase : Any = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case__ ).exists() os.remove(Path(snake_case__ ) )
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' from PIL import Image def lowercase (_A , _A ): """simple docstring""" def brightness(_A ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_A ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowerCAmelCase : Optional[int] = change_brightness(img, 1_00) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = BartphoTokenizer __magic_name__ = False __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : Any = ['▁This', '▁is', '▁a', '▁t', 'est'] _lowerCAmelCase : Optional[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : Tuple = {'unk_token': '<unk>'} _lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] ) with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) _lowerCAmelCase : List[Any] = BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = 'This is a là test' _lowerCAmelCase : Optional[int] = 'This is a<unk><unk> test' return input_text, output_text def a ( self ): '''simple docstring''' _lowerCAmelCase : int = BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map ) _lowerCAmelCase : Any = 'This is a là test' _lowerCAmelCase : Optional[Any] = '▁This ▁is ▁a ▁l à ▁t est'.split() _lowerCAmelCase : List[Any] = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token] _lowerCAmelCase : Optional[Any] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "roberta" def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = vocab_size _lowerCAmelCase : Optional[int] = hidden_size _lowerCAmelCase : Union[str, Any] = num_hidden_layers _lowerCAmelCase : str = num_attention_heads _lowerCAmelCase : int = hidden_act _lowerCAmelCase : int = intermediate_size _lowerCAmelCase : Tuple = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : Union[str, Any] = max_position_embeddings _lowerCAmelCase : str = type_vocab_size _lowerCAmelCase : Any = initializer_range _lowerCAmelCase : str = layer_norm_eps _lowerCAmelCase : Optional[int] = position_embedding_type _lowerCAmelCase : Optional[Any] = use_cache _lowerCAmelCase : Dict = classifier_dropout class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib lowerCAmelCase : Union[str, Any] = { """debug""": logging.DEBUG, """info""": logging.INFO, """warning""": logging.WARNING, """error""": logging.ERROR, """critical""": logging.CRITICAL, } lowerCAmelCase : Optional[int] = logging.WARNING def lowercase (): """simple docstring""" _lowerCAmelCase : Any = os.getenv('DATASETS_VERBOSITY' , _A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'Unknown option DATASETS_VERBOSITY={env_level_str}, ' f'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def lowercase (): """simple docstring""" return __name__.split('.' )[0] def lowercase (): """simple docstring""" return logging.getLogger(_get_library_name() ) def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowercase (): """simple docstring""" _lowerCAmelCase : Any = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowercase (_A = None ): """simple docstring""" if name is None: _lowerCAmelCase : List[Any] = _get_library_name() return logging.getLogger(_A ) def lowercase (): """simple docstring""" return _get_library_root_logger().getEffectiveLevel() def lowercase (_A ): """simple docstring""" _get_library_root_logger().setLevel(_A ) def lowercase (): """simple docstring""" return set_verbosity(_A ) def lowercase (): """simple docstring""" return set_verbosity(_A ) def lowercase (): """simple docstring""" return set_verbosity(_A ) def lowercase (): """simple docstring""" return set_verbosity(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : List[str] = False def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCamelCase__ : """simple docstring""" def __init__( self , *snake_case__ , **snake_case__ ): # pylint: disable=unused-argument '''simple docstring''' _lowerCAmelCase : Dict = args[0] if args else None def __iter__( self ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self , snake_case__ ): '''simple docstring''' def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): '''simple docstring''' return self def __exit__( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return lowerCAmelCase : str = True class UpperCamelCase__ : """simple docstring""" def __call__( self , *snake_case__ , snake_case__=False , **snake_case__ ): '''simple docstring''' if _tqdm_active and not disable: return tqdm_lib.tqdm(*snake_case__ , **snake_case__ ) else: return EmptyTqdm(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() lowerCAmelCase : Dict = _tqdm_cls() def lowercase (): """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def lowercase (): """simple docstring""" global _tqdm_active _lowerCAmelCase : Optional[int] = True def lowercase (): """simple docstring""" global _tqdm_active _lowerCAmelCase : Optional[int] = False
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' from __future__ import annotations import math def lowercase (_A ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = str(_A ) _lowerCAmelCase : Any = [n] for i in range(1 , len(_A ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase (_A ): """simple docstring""" if len(str(_A ) ) > 3: if not is_prime(int(str(_A )[-3:] ) ) or not is_prime(int(str(_A )[:3] ) ): return False return True def lowercase (_A = 1_1 ): """simple docstring""" _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Optional[int] = 1_3 while len(_A ) != count: if validate(_A ): _lowerCAmelCase : Any = list_truncated_nums(_A ) if all(is_prime(_A ) for i in list_nums ): list_truncated_primes.append(_A ) num += 2 return list_truncated_primes def lowercase (): """simple docstring""" return sum(compute_truncated_primes(1_1 ) ) if __name__ == "__main__": print(F'''{sum(compute_truncated_primes(11)) = }''')
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' from string import ascii_uppercase lowerCAmelCase : Any = {char: i for i, char in enumerate(ascii_uppercase)} lowerCAmelCase : Union[str, Any] = dict(enumerate(ascii_uppercase)) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : int = len(_A ) _lowerCAmelCase : str = 0 while True: if x == i: _lowerCAmelCase : Dict = 0 if len(_A ) == len(_A ): break key += key[i] i += 1 return key def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = '' _lowerCAmelCase : Any = 0 for letter in message: if letter == " ": cipher_text += " " else: _lowerCAmelCase : List[str] = (dicta[letter] - dicta[key_new[i]]) % 2_6 i += 1 cipher_text += dicta[x] return cipher_text def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = '' _lowerCAmelCase : List[str] = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: _lowerCAmelCase : Optional[int] = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6 i += 1 or_txt += dicta[x] return or_txt def lowercase (): """simple docstring""" _lowerCAmelCase : str = 'THE GERMAN ATTACK' _lowerCAmelCase : int = 'SECRET' _lowerCAmelCase : str = generate_key(_A , _A ) _lowerCAmelCase : int = cipher_text(_A , _A ) print(f'Encrypted Text = {s}' ) print(f'Original Text = {original_text(_A , _A )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "efficientformer" def __init__( self , snake_case__ = [3, 2, 6, 4] , snake_case__ = [48, 96, 224, 448] , snake_case__ = [True, True, True, True] , snake_case__ = 448 , snake_case__ = 32 , snake_case__ = 4 , snake_case__ = 7 , snake_case__ = 5 , snake_case__ = 8 , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = 16 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = 1 , snake_case__ = True , snake_case__ = True , snake_case__ = 1E-5 , snake_case__ = "gelu" , snake_case__ = 0.02 , snake_case__ = 1E-12 , snake_case__ = 224 , snake_case__ = 1E-05 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = hidden_act _lowerCAmelCase : Tuple = hidden_dropout_prob _lowerCAmelCase : Dict = hidden_sizes _lowerCAmelCase : Tuple = num_hidden_layers _lowerCAmelCase : Dict = num_attention_heads _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : int = layer_norm_eps _lowerCAmelCase : List[str] = patch_size _lowerCAmelCase : int = num_channels _lowerCAmelCase : List[Any] = depths _lowerCAmelCase : Dict = mlp_expansion_ratio _lowerCAmelCase : Tuple = downsamples _lowerCAmelCase : Optional[int] = dim _lowerCAmelCase : str = key_dim _lowerCAmelCase : Any = attention_ratio _lowerCAmelCase : Any = resolution _lowerCAmelCase : str = pool_size _lowerCAmelCase : Optional[int] = downsample_patch_size _lowerCAmelCase : Optional[int] = downsample_stride _lowerCAmelCase : Dict = downsample_pad _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : Union[str, Any] = num_metaad_blocks _lowerCAmelCase : Any = distillation _lowerCAmelCase : Tuple = use_layer_scale _lowerCAmelCase : Optional[int] = layer_scale_init_value _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : Tuple = batch_norm_eps
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" return 1 if input_a == input_a else 0 def lowercase (): """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase (): """simple docstring""" raise RuntimeError('CUDA out of memory.' ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = nn.Linear(3 , 4 ) _lowerCAmelCase : str = nn.BatchNormad(4 ) _lowerCAmelCase : int = nn.Linear(4 , 5 ) def a ( self , snake_case__ ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case__ ): nonlocal batch_sizes batch_sizes.append(snake_case__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(snake_case__ , [128, 64, 32, 16, 8] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case__ , snake_case__ ): nonlocal batch_sizes batch_sizes.append(snake_case__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga _lowerCAmelCase , _lowerCAmelCase : str = mock_training_loop_function('hello' ) self.assertListEqual(snake_case__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, 'hello'] ) def a ( self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(snake_case__ ): pass with self.assertRaises(snake_case__ ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def a ( self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(snake_case__ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(snake_case__ ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def a ( self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case__ , snake_case__ , snake_case__ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(snake_case__ ) as cm: mock_training_loop_function(128 , 'hello' , 'world' ) self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] ) self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] ) def a ( self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(snake_case__ ): raise ValueError('Oops, we had an error!' ) with self.assertRaises(snake_case__ ) as cm: mock_training_loop_function() self.assertIn('Oops, we had an error!' , cm.exception.args[0] ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = torch.cuda.memory_allocated() _lowerCAmelCase : Union[str, Any] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , snake_case__ ) _lowerCAmelCase : List[Any] = release_memory(snake_case__ ) self.assertEqual(torch.cuda.memory_allocated() , snake_case__ )
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = CodeGenTokenizer __magic_name__ = CodeGenTokenizerFast __magic_name__ = True __magic_name__ = {"add_prefix_space": True} __magic_name__ = False def a ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowerCAmelCase : Optional[int] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] _lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _lowerCAmelCase : str = {'unk_token': '<unk>'} _lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = 'lower newer' _lowerCAmelCase : List[str] = 'lower newer' return input_text, output_text def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _lowerCAmelCase : Optional[int] = 'lower newer' _lowerCAmelCase : int = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] _lowerCAmelCase : Dict = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token] _lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return _lowerCAmelCase : Union[str, Any] = self.get_tokenizer() _lowerCAmelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=snake_case__ ) _lowerCAmelCase : Optional[Any] = 'lower newer' # Testing tokenization _lowerCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ ) _lowerCAmelCase : List[str] = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # Testing conversion to ids without special tokens _lowerCAmelCase : List[str] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ ) _lowerCAmelCase : Tuple = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # Testing conversion to ids with special tokens _lowerCAmelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=snake_case__ ) _lowerCAmelCase : Dict = tokenizer.encode(snake_case__ , add_prefix_space=snake_case__ ) _lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # Testing the unknown token _lowerCAmelCase : Union[str, Any] = tokens + [rust_tokenizer.unk_token] _lowerCAmelCase : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' pass def a ( self , snake_case__=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) # Simple input _lowerCAmelCase : Optional[int] = 'This is a simple input' _lowerCAmelCase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2'] _lowerCAmelCase : List[str] = ('This is a simple input', 'This is a pair') _lowerCAmelCase : Any = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Simple input self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Simple input self.assertRaises( snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' , ) # Pair input self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Pair input self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Pair input self.assertRaises( snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input _lowerCAmelCase : List[Any] = 'This is a simple input' _lowerCAmelCase : List[str] = ['This is a simple input looooooooong', 'This is a simple input'] _lowerCAmelCase : List[str] = ('This is a simple input', 'This is a pair') _lowerCAmelCase : List[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] _lowerCAmelCase : int = tokenizer.pad_token_id _lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , padding='max_length' , max_length=30 , return_tensors='np' ) _lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='np' ) _lowerCAmelCase : Optional[int] = tokenizer(*snake_case__ , padding='max_length' , max_length=60 , return_tensors='np' ) _lowerCAmelCase : Tuple = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = '$$$' _lowerCAmelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case__ , add_bos_token=snake_case__ ) _lowerCAmelCase : Optional[int] = 'This is a simple input' _lowerCAmelCase : Optional[int] = ['This is a simple input 1', 'This is a simple input 2'] _lowerCAmelCase : Any = tokenizer.bos_token_id _lowerCAmelCase : str = tokenizer(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer(snake_case__ ) self.assertEqual(out_s.input_ids[0] , snake_case__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _lowerCAmelCase : int = tokenizer.decode(out_s.input_ids ) _lowerCAmelCase : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , snake_case__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) _lowerCAmelCase : Any = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' _lowerCAmelCase : int = '\nif len_a > len_b: result = a\nelse: result = b' _lowerCAmelCase : Optional[int] = tokenizer.encode(snake_case__ ) _lowerCAmelCase : Any = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] _lowerCAmelCase : Tuple = tokenizer.decode(snake_case__ , truncate_before_pattern=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' pass
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowercase (_A ): """simple docstring""" def is_in_circle(_A , _A ) -> bool: _lowerCAmelCase : Any = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _lowerCAmelCase : Any = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(_A ) ) # The ratio of the area for circle to square is pi/4. _lowerCAmelCase : List[Any] = proportion * 4 print(f'The estimated value of pi is {pi_estimate}' ) print(f'The numpy value of pi is {pi}' ) print(f'The total error is {abs(pi - pi_estimate )}' ) def lowercase (_A , _A , _A = 0.0 , _A = 1.0 , ): """simple docstring""" return mean( function_to_integrate(uniform(_A , _A ) ) for _ in range(_A ) ) * (max_value - min_value) def lowercase (_A , _A = 0.0 , _A = 1.0 ): """simple docstring""" def identity_function(_A ) -> float: return x _lowerCAmelCase : List[str] = area_under_curve_estimator( _A , _A , _A , _A ) _lowerCAmelCase : Optional[int] = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(f'Estimated value is {estimated_value}' ) print(f'Expected value is {expected_value}' ) print(f'Total error is {abs(estimated_value - expected_value )}' ) print('******************' ) def lowercase (_A ): """simple docstring""" def function_to_integrate(_A ) -> float: return sqrt(4.0 - x * x ) _lowerCAmelCase : Dict = area_under_curve_estimator( _A , _A , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(f'Estimated value is {estimated_value}' ) print(f'Expected value is {pi}' ) print(f'Total error is {abs(estimated_value - pi )}' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase : Dict = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "linear" __magic_name__ = "cosine" __magic_name__ = "cosine_with_restarts" __magic_name__ = "polynomial" __magic_name__ = "constant" __magic_name__ = "constant_with_warmup" __magic_name__ = "piecewise_constant" def lowercase (_A , _A = -1 ): """simple docstring""" return LambdaLR(_A , lambda _A : 1 , last_epoch=_A ) def lowercase (_A , _A , _A = -1 ): """simple docstring""" def lr_lambda(_A ): if current_step < num_warmup_steps: return float(_A ) / float(max(1.0 , _A ) ) return 1.0 return LambdaLR(_A , _A , last_epoch=_A ) def lowercase (_A , _A , _A = -1 ): """simple docstring""" _lowerCAmelCase : Tuple = {} _lowerCAmelCase : List[Any] = step_rules.split(',' ) for rule_str in rule_list[:-1]: _lowerCAmelCase , _lowerCAmelCase : int = rule_str.split(':' ) _lowerCAmelCase : int = int(_A ) _lowerCAmelCase : str = float(_A ) _lowerCAmelCase : Optional[Any] = value _lowerCAmelCase : List[Any] = float(rule_list[-1] ) def create_rules_function(_A , _A ): def rule_func(_A ) -> float: _lowerCAmelCase : List[Any] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(_A ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _lowerCAmelCase : Dict = create_rules_function(_A , _A ) return LambdaLR(_A , _A , last_epoch=_A ) def lowercase (_A , _A , _A , _A=-1 ): """simple docstring""" def lr_lambda(_A ): if current_step < num_warmup_steps: return float(_A ) / float(max(1 , _A ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(_A , _A , _A ) def lowercase (_A , _A , _A , _A = 0.5 , _A = -1 ): """simple docstring""" def lr_lambda(_A ): if current_step < num_warmup_steps: return float(_A ) / float(max(1 , _A ) ) _lowerCAmelCase : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) ) return LambdaLR(_A , _A , _A ) def lowercase (_A , _A , _A , _A = 1 , _A = -1 ): """simple docstring""" def lr_lambda(_A ): if current_step < num_warmup_steps: return float(_A ) / float(max(1 , _A ) ) _lowerCAmelCase : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) ) return LambdaLR(_A , _A , _A ) def lowercase (_A , _A , _A , _A=1E-7 , _A=1.0 , _A=-1 ): """simple docstring""" _lowerCAmelCase : Optional[int] = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(_A ): if current_step < num_warmup_steps: return float(_A ) / float(max(1 , _A ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _lowerCAmelCase : List[str] = lr_init - lr_end _lowerCAmelCase : Optional[int] = num_training_steps - num_warmup_steps _lowerCAmelCase : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps _lowerCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_A , _A , _A ) lowerCAmelCase : List[str] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowercase (_A , _A , _A = None , _A = None , _A = None , _A = 1 , _A = 1.0 , _A = -1 , ): """simple docstring""" _lowerCAmelCase : str = SchedulerType(_A ) _lowerCAmelCase : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_A , last_epoch=_A ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_A , step_rules=_A , last_epoch=_A ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , ) return schedule_func( _A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : int = tempfile.mkdtemp() _lowerCAmelCase : Dict = BlipImageProcessor() _lowerCAmelCase : Any = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) _lowerCAmelCase : str = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' ) _lowerCAmelCase : Tuple = InstructBlipProcessor(snake_case__ , snake_case__ , snake_case__ ) processor.save_pretrained(self.tmpdirname ) def a ( self , **snake_case__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer def a ( self , **snake_case__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor def a ( self , **snake_case__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).qformer_tokenizer def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) _lowerCAmelCase : Optional[Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) self.assertIsInstance(processor.qformer_tokenizer , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.get_image_processor() _lowerCAmelCase : int = self.get_tokenizer() _lowerCAmelCase : List[str] = self.get_qformer_tokenizer() _lowerCAmelCase : Tuple = InstructBlipProcessor( tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ ) _lowerCAmelCase : Dict = self.prepare_image_inputs() _lowerCAmelCase : List[str] = image_processor(snake_case__ , return_tensors='np' ) _lowerCAmelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.get_image_processor() _lowerCAmelCase : List[str] = self.get_tokenizer() _lowerCAmelCase : Union[str, Any] = self.get_qformer_tokenizer() _lowerCAmelCase : List[str] = InstructBlipProcessor( tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ ) _lowerCAmelCase : List[str] = 'lower newer' _lowerCAmelCase : Tuple = processor(text=snake_case__ ) _lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ ) _lowerCAmelCase : Dict = qformer_tokenizer(snake_case__ , return_token_type_ids=snake_case__ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_image_processor() _lowerCAmelCase : Dict = self.get_tokenizer() _lowerCAmelCase : Tuple = self.get_qformer_tokenizer() _lowerCAmelCase : Optional[Any] = InstructBlipProcessor( tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ ) _lowerCAmelCase : List[Any] = 'lower newer' _lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCAmelCase : List[Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.get_image_processor() _lowerCAmelCase : Optional[Any] = self.get_tokenizer() _lowerCAmelCase : Tuple = self.get_qformer_tokenizer() _lowerCAmelCase : str = InstructBlipProcessor( tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ ) _lowerCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase : Tuple = processor.batch_decode(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.get_image_processor() _lowerCAmelCase : Any = self.get_tokenizer() _lowerCAmelCase : Optional[Any] = self.get_qformer_tokenizer() _lowerCAmelCase : List[str] = InstructBlipProcessor( tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ ) _lowerCAmelCase : Optional[int] = 'lower newer' _lowerCAmelCase : str = self.prepare_image_inputs() _lowerCAmelCase : Optional[int] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=2 , snake_case__=3 , snake_case__=4 , snake_case__=2 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=36 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=6 , snake_case__=6 , snake_case__=3 , snake_case__=4 , snake_case__=None , snake_case__=1000 , ): '''simple docstring''' _lowerCAmelCase : Optional[int] = parent _lowerCAmelCase : Optional[Any] = batch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : str = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : int = is_training _lowerCAmelCase : Optional[Any] = use_input_mask _lowerCAmelCase : Dict = use_token_type_ids _lowerCAmelCase : int = use_labels _lowerCAmelCase : Union[str, Any] = vocab_size _lowerCAmelCase : Any = hidden_size _lowerCAmelCase : Any = num_hidden_layers _lowerCAmelCase : List[str] = num_attention_heads _lowerCAmelCase : int = intermediate_size _lowerCAmelCase : Dict = hidden_act _lowerCAmelCase : List[Any] = hidden_dropout_prob _lowerCAmelCase : int = attention_probs_dropout_prob _lowerCAmelCase : List[Any] = max_position_embeddings _lowerCAmelCase : Tuple = type_vocab_size _lowerCAmelCase : int = type_sequence_label_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Dict = coordinate_size _lowerCAmelCase : Tuple = shape_size _lowerCAmelCase : List[str] = num_labels _lowerCAmelCase : Optional[int] = num_choices _lowerCAmelCase : Optional[int] = scope _lowerCAmelCase : List[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _lowerCAmelCase : List[Any] = text_seq_length _lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2 + 1 _lowerCAmelCase : Tuple = self.text_seq_length + self.image_seq_length def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) _lowerCAmelCase : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase : int = bbox[i, j, 3] _lowerCAmelCase : List[str] = bbox[i, j, 1] _lowerCAmelCase : Tuple = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase : Union[str, Any] = bbox[i, j, 2] _lowerCAmelCase : Any = bbox[i, j, 0] _lowerCAmelCase : str = tmp_coordinate _lowerCAmelCase : Tuple = tf.constant(snake_case__ ) _lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Dict = None if self.use_input_mask: _lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] ) _lowerCAmelCase : Any = None if self.use_token_type_ids: _lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _lowerCAmelCase : Tuple = None _lowerCAmelCase : List[Any] = None if self.use_labels: _lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _lowerCAmelCase : Dict = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TFLayoutLMvaModel(config=snake_case__ ) # text + image _lowerCAmelCase : Tuple = model(snake_case__ , pixel_values=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Union[str, Any] = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , training=snake_case__ , ) _lowerCAmelCase : Optional[int] = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _lowerCAmelCase : Any = model(snake_case__ , training=snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _lowerCAmelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.num_labels _lowerCAmelCase : Any = TFLayoutLMvaForSequenceClassification(config=snake_case__ ) _lowerCAmelCase : Optional[int] = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.num_labels _lowerCAmelCase : Dict = TFLayoutLMvaForTokenClassification(config=snake_case__ ) _lowerCAmelCase : Optional[int] = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = 2 _lowerCAmelCase : Tuple = TFLayoutLMvaForQuestionAnswering(config=snake_case__ ) _lowerCAmelCase : Optional[int] = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , training=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[Any] = config_and_inputs _lowerCAmelCase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return True def a ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = copy.deepcopy(snake_case__ ) if model_class in get_values(snake_case__ ): _lowerCAmelCase : Dict = { k: tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(snake_case__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(snake_case__ ): _lowerCAmelCase : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case__ ): _lowerCAmelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) _lowerCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case__ ): _lowerCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case__ ): _lowerCAmelCase : List[Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = TFLayoutLMvaModelTester(self ) _lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Dict = model_class(snake_case__ ) if getattr(snake_case__ , 'hf_compute_loss' , snake_case__ ): # The number of elements in the loss should be the same as the number of elements in the label _lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Tuple = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case__ )[0] ] _lowerCAmelCase : str = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs _lowerCAmelCase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Optional[int] = prepared_for_class.pop('input_ids' ) _lowerCAmelCase : Any = model(snake_case__ , **snake_case__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions _lowerCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Optional[int] = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: _lowerCAmelCase : Optional[Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: _lowerCAmelCase : int = -100 _lowerCAmelCase : List[str] = tf.convert_to_tensor(snake_case__ ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ , **snake_case__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict _lowerCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Dict = model(snake_case__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple _lowerCAmelCase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) # Get keys that were added with the _prepare_for_class function _lowerCAmelCase : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() _lowerCAmelCase : List[str] = inspect.signature(model.call ).parameters _lowerCAmelCase : Dict = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple _lowerCAmelCase : Union[str, Any] = {0: 'input_ids'} for label_key in label_keys: _lowerCAmelCase : Any = signature_names.index(snake_case__ ) _lowerCAmelCase : Any = label_key _lowerCAmelCase : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple _lowerCAmelCase : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: _lowerCAmelCase : Union[str, Any] = prepared_for_class[value] _lowerCAmelCase : int = tuple(snake_case__ ) # Send to model _lowerCAmelCase : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def a ( self ): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : List[str] = type self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Optional[int] = TFLayoutLMvaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) _lowerCAmelCase : List[Any] = self.default_image_processor _lowerCAmelCase : Optional[Any] = prepare_img() _lowerCAmelCase : Union[str, Any] = image_processor(images=snake_case__ , return_tensors='tf' ).pixel_values _lowerCAmelCase : Optional[Any] = tf.constant([[1, 2]] ) _lowerCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass _lowerCAmelCase : Any = model(input_ids=snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ ) # verify the logits _lowerCAmelCase : Dict = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , snake_case__ ) _lowerCAmelCase : Optional[int] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ) )
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' from __future__ import annotations def lowercase (_A , _A , _A ): """simple docstring""" if days_between_payments <= 0: raise ValueError('days_between_payments must be > 0' ) if daily_interest_rate < 0: raise ValueError('daily_interest_rate must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return principal * daily_interest_rate * days_between_payments def lowercase (_A , _A , _A , ): """simple docstring""" if number_of_compounding_periods <= 0: raise ValueError('number_of_compounding_periods must be > 0' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowercase (_A , _A , _A , ): """simple docstring""" if number_of_years <= 0: raise ValueError('number_of_years must be > 0' ) if nominal_annual_percentage_rate < 0: raise ValueError('nominal_annual_percentage_rate must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return compound_interest( _A , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 ) if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowerCAmelCase : Dict = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowerCAmelCase : List[str] = [0, 25, 50] lowerCAmelCase : List[Any] = [25, 50, 75] lowerCAmelCase : Dict = fuzz.membership.trimf(X, abca) lowerCAmelCase : Optional[Any] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowerCAmelCase : Optional[Any] = np.ones(75) lowerCAmelCase : Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) lowerCAmelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowerCAmelCase : Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowerCAmelCase : Optional[int] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowerCAmelCase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowerCAmelCase : Tuple = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowerCAmelCase : List[Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowerCAmelCase : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowerCAmelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' import random def lowercase (_A , _A , _A = False ): """simple docstring""" _lowerCAmelCase : dict = {i: [] for i in range(_A )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(_A ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(_A ): for j in range(i + 1 , _A ): if random.random() < probability: graph[i].append(_A ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(_A ) return graph def lowercase (_A ): """simple docstring""" return { i: [j for j in range(_A ) if i != j] for i in range(_A ) } if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = torch.nn.Linear(10 , 10 ) _lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , 0.1 ) _lowerCAmelCase : List[str] = Accelerator() _lowerCAmelCase : int = accelerator.prepare(snake_case__ ) try: pickle.loads(pickle.dumps(snake_case__ ) ) except Exception as e: self.fail(F'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowerCAmelCase : Optional[Any] = None lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : List[Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Optional[Any] = { """google/bigbird-roberta-base""": 40_96, """google/bigbird-roberta-large""": 40_96, """google/bigbird-base-trivia-itc""": 40_96, } lowerCAmelCase : str = """▁""" class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = BigBirdTokenizer __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token _lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token _lowerCAmelCase : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token _lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token _lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[Any] = vocab_file _lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[str] = [self.sep_token_id] _lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : List[Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : List[str] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE_ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __magic_name__ = Features({"audio": Audio()} ) __magic_name__ = Features({"labels": ClassLabel} ) __magic_name__ = "audio" __magic_name__ = "labels" def a ( self , snake_case__ ): '''simple docstring''' if self.label_column not in features: raise ValueError(F'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , snake_case__ ): raise ValueError(F'Column {self.label_column} is not a ClassLabel.' ) _lowerCAmelCase : List[str] = copy.deepcopy(self ) _lowerCAmelCase : Tuple = self.label_schema.copy() _lowerCAmelCase : Any = features[self.label_column] _lowerCAmelCase : Union[str, Any] = label_schema return task_template @property def a ( self ): '''simple docstring''' return { self.audio_column: "audio", self.label_column: "labels", }
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' import math import sys def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = '' try: with open(_A , 'rb' ) as binary_file: _lowerCAmelCase : Dict = binary_file.read() for dat in data: _lowerCAmelCase : Any = f'{dat:08b}' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = {'0': '0', '1': '1'} _lowerCAmelCase , _lowerCAmelCase : Optional[int] = '', '' _lowerCAmelCase : Optional[Any] = len(_A ) for i in range(len(_A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue _lowerCAmelCase : Any = lexicon[curr_string] result += last_match_id _lowerCAmelCase : Dict = last_match_id + '0' if math.loga(_A ).is_integer(): _lowerCAmelCase : str = {} for curr_key in list(_A ): _lowerCAmelCase : Any = lexicon.pop(_A ) _lowerCAmelCase : Tuple = new_lex _lowerCAmelCase : List[str] = last_match_id + '1' index += 1 _lowerCAmelCase : Dict = '' return result def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 8 try: with open(_A , 'wb' ) as opened_file: _lowerCAmelCase : Tuple = [ to_write[i : i + byte_length] for i in range(0 , len(_A ) , _A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 for letter in data_bits: if letter == "1": break counter += 1 _lowerCAmelCase : Optional[Any] = data_bits[counter:] _lowerCAmelCase : str = data_bits[counter + 1 :] return data_bits def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = read_file_binary(_A ) _lowerCAmelCase : List[str] = remove_prefix(_A ) _lowerCAmelCase : str = decompress_data(_A ) write_file_binary(_A , _A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : Optional[Any] = BlipImageProcessor() _lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' ) _lowerCAmelCase : str = BlipProcessor(snake_case__ , snake_case__ ) processor.save_pretrained(self.tmpdirname ) def a ( self , **snake_case__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer def a ( self , **snake_case__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) _lowerCAmelCase : Optional[Any] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.get_image_processor() _lowerCAmelCase : Tuple = self.get_tokenizer() _lowerCAmelCase : Any = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Dict = self.prepare_image_inputs() _lowerCAmelCase : Tuple = image_processor(snake_case__ , return_tensors='np' ) _lowerCAmelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.get_image_processor() _lowerCAmelCase : Optional[int] = self.get_tokenizer() _lowerCAmelCase : Optional[int] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Any = 'lower newer' _lowerCAmelCase : str = processor(text=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer(snake_case__ , return_token_type_ids=snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_image_processor() _lowerCAmelCase : Optional[Any] = self.get_tokenizer() _lowerCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Optional[Any] = 'lower newer' _lowerCAmelCase : Dict = self.prepare_image_inputs() _lowerCAmelCase : Dict = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.get_image_processor() _lowerCAmelCase : str = self.get_tokenizer() _lowerCAmelCase : int = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase : Tuple = processor.batch_decode(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.get_image_processor() _lowerCAmelCase : List[str] = self.get_tokenizer() _lowerCAmelCase : Any = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Any = 'lower newer' _lowerCAmelCase : str = self.prepare_image_inputs() _lowerCAmelCase : Any = processor(text=snake_case__ , images=snake_case__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowerCAmelCase : str = random.Random() if is_torch_available(): import torch def lowercase (_A , _A=1.0 , _A=None , _A=None ): """simple docstring""" if rng is None: _lowerCAmelCase : List[Any] = global_rng _lowerCAmelCase : int = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=400 , snake_case__=2000 , snake_case__=1 , snake_case__=0.0 , snake_case__=1_6000 , snake_case__=True , snake_case__=True , ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : Tuple = batch_size _lowerCAmelCase : str = min_seq_length _lowerCAmelCase : Any = max_seq_length _lowerCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _lowerCAmelCase : Tuple = feature_size _lowerCAmelCase : Union[str, Any] = padding_value _lowerCAmelCase : Dict = sampling_rate _lowerCAmelCase : Dict = return_attention_mask _lowerCAmelCase : str = do_normalize def a ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a ( self , snake_case__=False , snake_case__=False ): '''simple docstring''' def _flatten(snake_case__ ): return list(itertools.chain(*snake_case__ ) ) if equal_length: _lowerCAmelCase : Any = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size _lowerCAmelCase : Any = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _lowerCAmelCase : str = [np.asarray(snake_case__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ASTFeatureExtractor def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ASTFeatureExtractionTester(self ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Optional[int] = [np.asarray(snake_case__ ) for speech_input in speech_inputs] # Test not batched input _lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values _lowerCAmelCase : List[str] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) # Test batched _lowerCAmelCase : str = feat_extract(snake_case__ , padding=snake_case__ , return_tensors='np' ).input_values _lowerCAmelCase : List[Any] = feat_extract(snake_case__ , padding=snake_case__ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ): self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] _lowerCAmelCase : Any = np.asarray(snake_case__ ) _lowerCAmelCase : Optional[Any] = feat_extract(snake_case__ , return_tensors='np' ).input_values _lowerCAmelCase : str = feat_extract(snake_case__ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ): self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) @require_torch def a ( self ): '''simple docstring''' import torch _lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowerCAmelCase : str = np.random.rand(100 ).astype(np.floataa ) _lowerCAmelCase : Optional[int] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) _lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a ( self , snake_case__ ): '''simple docstring''' from datasets import load_dataset _lowerCAmelCase : Any = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _lowerCAmelCase : Union[str, Any] = ds.sort('id' ).select(range(snake_case__ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on _lowerCAmelCase : List[Any] = self._load_datasamples(1 ) _lowerCAmelCase : Dict = ASTFeatureExtractor() _lowerCAmelCase : str = feature_extractor(snake_case__ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , snake_case__ , atol=1E-4 ) )
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Optional[Any] = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[Any] = { """google/electra-small-generator""": 5_12, """google/electra-base-generator""": 5_12, """google/electra-large-generator""": 5_12, """google/electra-small-discriminator""": 5_12, """google/electra-base-discriminator""": 5_12, """google/electra-large-discriminator""": 5_12, } lowerCAmelCase : Dict = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_INIT_CONFIGURATION __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ElectraTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars ): _lowerCAmelCase : Tuple = getattr(snake_case__ , normalizer_state.pop('type' ) ) _lowerCAmelCase : List[Any] = do_lower_case _lowerCAmelCase : List[Any] = strip_accents _lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars _lowerCAmelCase : Optional[int] = normalizer_class(**snake_case__ ) _lowerCAmelCase : List[Any] = do_lower_case def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[Any] = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' from __future__ import annotations import math def lowercase (_A , _A , _A , _A , _A ): """simple docstring""" if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(_A ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , ) return min( minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , ) def lowercase (): """simple docstring""" _lowerCAmelCase : List[Any] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] _lowerCAmelCase : Optional[int] = math.log(len(_A ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , _A , _A , _A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase : int = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = ["image_processor", "tokenizer"] __magic_name__ = "LayoutLMv2ImageProcessor" __magic_name__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , snake_case__ , ) _lowerCAmelCase : Union[str, Any] = kwargs.pop('feature_extractor' ) _lowerCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor _lowerCAmelCase : Union[str, Any] = self.image_processor(images=snake_case__ , return_tensors=snake_case__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Any = [text] # add batch dimension (as the image processor always adds a batch dimension) _lowerCAmelCase : Dict = features['words'] _lowerCAmelCase : str = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel values _lowerCAmelCase : Optional[Any] = features.pop('pixel_values' ) if return_overflowing_tokens is True: _lowerCAmelCase : Union[str, Any] = self.get_overflowing_images(snake_case__ , encoded_inputs['overflow_to_sample_mapping'] ) _lowerCAmelCase : int = images return encoded_inputs def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F' {len(snake_case__ )} and {len(snake_case__ )}' ) return images_with_overflow def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def a ( self ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def a ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case__ , ) return self.image_processor_class @property def a ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case__ , ) return self.image_processor
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 5_00_03 lowerCAmelCase : Optional[Any] = 5_00_02 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = PLBartTokenizer __magic_name__ = None __magic_name__ = False def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : Optional[int] = PLBartTokenizer(snake_case__ , language_codes='base' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = PLBartTokenizer(snake_case__ , language_codes='base' , keep_accents=snake_case__ ) _lowerCAmelCase : Dict = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) _lowerCAmelCase : str = tokenizer.vocab_size _lowerCAmelCase : Tuple = [tokenizer.convert_ids_to_tokens(snake_case__ ) for x in range(end - 4 , snake_case__ )] self.assertListEqual(snake_case__ , ['__java__', '__python__', '__en_XX__', '<mask>'] ) _lowerCAmelCase : Optional[int] = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go' _lowerCAmelCase : List[Any] = tokenizer(snake_case__ ).input_ids self.assertEqual( tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) , snake_case__ , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = PLBartTokenizer(snake_case__ , language_codes='multi' , keep_accents=snake_case__ ) _lowerCAmelCase : List[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) _lowerCAmelCase : Any = tokenizer.vocab_size _lowerCAmelCase : int = [tokenizer.convert_ids_to_tokens(snake_case__ ) for x in range(end - 7 , snake_case__ )] self.assertListEqual( snake_case__ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] ) _lowerCAmelCase : List[str] = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go' _lowerCAmelCase : str = tokenizer(snake_case__ ).input_ids self.assertEqual( tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) , snake_case__ , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "uclanlp/plbart-python-en_XX" __magic_name__ = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] __magic_name__ = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] __magic_name__ = [ 1_3_4, 5_4_5_2, 3_3_4_6_0, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 9_8_8, 2_0, 3_3_4_5_6, 1_9, 3_3_4_5_6, 7_7_1, 3_9, 4_2_5_8, 8_8_9, 3_3_1_8, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 2_4_7_1, 2, PYTHON_CODE, ] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' ) _lowerCAmelCase : Optional[int] = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2] _lowerCAmelCase : Optional[Any] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20] self.assertIsInstance(src_text[0] , snake_case__ ) _lowerCAmelCase : int = 10 _lowerCAmelCase : str = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , snake_case__ ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : str = PLBartTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Union[str, Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , snake_case__ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : List[str] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) _lowerCAmelCase : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : int = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : Optional[Any] = targets['input_ids'] _lowerCAmelCase : Optional[int] = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' ) self.assertEqual( nested_simplify(snake_case__ ) , { # A, test, EOS, en_XX 'input_ids': [[150, 242, 2, 5_0003]], 'attention_mask': [[1, 1, 1, 1]], # java 'forced_bos_token_id': 5_0001, } , )
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' def lowercase (_A = 1_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = 2**power _lowerCAmelCase : str = str(_A ) _lowerCAmelCase : str = list(_A ) _lowerCAmelCase : Tuple = 0 for i in list_num: sum_of_num += int(_A ) return sum_of_num if __name__ == "__main__": lowerCAmelCase : List[str] = int(input("""Enter the power of 2: """).strip()) print("""2 ^ """, power, """ = """, 2**power) lowerCAmelCase : Optional[int] = solution(power) print("""Sum of the digits is: """, result)
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def lowercase (_A ): """simple docstring""" if not sentence: return "" _lowerCAmelCase : str = dict(zip(_A , _A ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "ibert" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=False , snake_case__="none" , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[int] = vocab_size _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[Any] = intermediate_size _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : int = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : List[str] = type_vocab_size _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : str = layer_norm_eps _lowerCAmelCase : Union[str, Any] = position_embedding_type _lowerCAmelCase : Tuple = quant_mode _lowerCAmelCase : int = force_dequant class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCAmelCase : Tuple = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(*snake_case__ , **snake_case__ ) requires_backends(self , 'vision' ) self.check_model_type(snake_case__ ) def __call__( self , snake_case__ , **snake_case__ ): '''simple docstring''' return super().__call__(snake_case__ , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return {}, {}, {} def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = load_image(snake_case__ ) _lowerCAmelCase : Optional[int] = image.size _lowerCAmelCase : Union[str, Any] = self.image_processor(images=snake_case__ , return_tensors=self.framework ) return model_inputs def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.model(**snake_case__ ) return model_outputs def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = model_outputs.predicted_depth _lowerCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=snake_case__ ) _lowerCAmelCase : Dict = prediction.squeeze().cpu().numpy() _lowerCAmelCase : Union[str, Any] = (output * 255 / np.max(snake_case__ )).astype('uint8' ) _lowerCAmelCase : Union[str, Any] = Image.fromarray(snake_case__ ) _lowerCAmelCase : List[str] = {} _lowerCAmelCase : Tuple = predicted_depth _lowerCAmelCase : Union[str, Any] = depth return output_dict
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = [] if isinstance(_A , _A ): for v in tree.values(): shapes.extend(_fetch_dims(_A ) ) elif isinstance(_A , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(_A ) ) elif isinstance(_A , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('Not supported' ) return shapes @torch.jit.ignore def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = [] for d in reversed(_A ): idx.append(flat_idx % d ) _lowerCAmelCase : Optional[Any] = flat_idx // d return tuple(reversed(_A ) ) @torch.jit.ignore def lowercase (_A , _A , _A , _A = None , _A = None , ): """simple docstring""" def reduce_edge_list(_A ) -> None: _lowerCAmelCase : Optional[Any] = True for i in range(len(_A ) ): _lowerCAmelCase : Union[str, Any] = -1 * (i + 1) l[reversed_idx] &= tally _lowerCAmelCase : Union[str, Any] = l[reversed_idx] if start_edges is None: _lowerCAmelCase : int = [s == 0 for s in start] reduce_edge_list(_A ) if end_edges is None: _lowerCAmelCase : Union[str, Any] = [e == (d - 1) for e, d in zip(_A , _A )] reduce_edge_list(_A ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(_A ) == 0: return [()] elif len(_A ) == 1: return [(slice(start[0] , end[0] + 1 ),)] _lowerCAmelCase : List[Tuple[slice, ...]] = [] _lowerCAmelCase : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(_A , _A ): if s == e: path_list.append(slice(_A , s + 1 ) ) else: break _lowerCAmelCase : Tuple[slice, ...] = tuple(_A ) _lowerCAmelCase : List[Any] = len(_A ) # start == end, and we're done if divergence_idx == len(_A ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _lowerCAmelCase : Tuple = start[divergence_idx] return tuple( path + (slice(_A , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _lowerCAmelCase : Tuple = end[divergence_idx] return tuple( path + (slice(_A , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) _lowerCAmelCase : Tuple = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : Any = t.shape[:no_batch_dims] _lowerCAmelCase : Dict = list(_flat_idx_to_idx(_A , _A ) ) # _get_minimal_slice_set is inclusive _lowerCAmelCase : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) ) # Get an ordered list of slices to perform _lowerCAmelCase : str = _get_minimal_slice_set( _A , _A , _A , ) _lowerCAmelCase : str = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowercase (_A , _A , _A , _A , _A = False , _A = None , _A = False , ): """simple docstring""" if not (len(_A ) > 0): raise ValueError('Must provide at least one input' ) _lowerCAmelCase : Dict = [shape[:no_batch_dims] for shape in _fetch_dims(_A )] _lowerCAmelCase : List[Any] = tuple([max(_A ) for s in zip(*_A )] ) def _prep_inputs(_A ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: _lowerCAmelCase : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) _lowerCAmelCase : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: _lowerCAmelCase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t _lowerCAmelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A ) _lowerCAmelCase : Any = None if _out is not None: _lowerCAmelCase : Union[str, Any] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) _lowerCAmelCase : Any = 1 for d in orig_batch_dims: flat_batch_dim *= d _lowerCAmelCase : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(_A ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t _lowerCAmelCase : Any = 0 _lowerCAmelCase : Optional[int] = prepped_outputs for _ in range(_A ): # Chunk the input if not low_mem: _lowerCAmelCase : Union[str, Any] = _select_chunk else: _lowerCAmelCase : str = partial( _chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , ) _lowerCAmelCase : Dict[str, Any] = tensor_tree_map(_A , _A ) # Run the layer on the chunk _lowerCAmelCase : Optional[int] = layer(**_A ) # Allocate space for the output if out is None: _lowerCAmelCase : List[Any] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A ) # Put the chunk in its pre-allocated space if isinstance(_A , _A ): def assign(_A , _A ) -> None: for k, v in da.items(): if isinstance(_A , _A ): assign(_A , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: _lowerCAmelCase : Any = da[k] assign(_A , _A ) elif isinstance(_A , _A ): for xa, xa in zip(_A , _A ): if _add_into_out: xa[i : i + chunk_size] += xa else: _lowerCAmelCase : Optional[int] = xa elif isinstance(_A , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: _lowerCAmelCase : int = output_chunk else: raise ValueError('Not supported' ) i += chunk_size _lowerCAmelCase : List[Any] = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A ) return out class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ = 512 , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = max_chunk_size _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[tuple] = None def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' logging.info('Tuning chunk size...' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size _lowerCAmelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] _lowerCAmelCase : str = [c for c in candidates if c > min_chunk_size] _lowerCAmelCase : Optional[Any] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case__ ) -> bool: try: with torch.no_grad(): fn(*snake_case__ , chunk_size=snake_case__ ) return True except RuntimeError: return False _lowerCAmelCase : int = 0 _lowerCAmelCase : int = len(snake_case__ ) - 1 while i > min_viable_chunk_size_index: _lowerCAmelCase : Optional[Any] = test_chunk_size(candidates[i] ) if not viable: _lowerCAmelCase : Optional[int] = (min_viable_chunk_size_index + i) // 2 else: _lowerCAmelCase : List[str] = i _lowerCAmelCase : Optional[Any] = (i + len(snake_case__ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = True for aa, aa in zip(snake_case__ , snake_case__ ): assert type(snake_case__ ) == type(snake_case__ ) if isinstance(snake_case__ , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case__ : x[0] )] _lowerCAmelCase : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda snake_case__ : x[0] )] consistent &= self._compare_arg_caches(snake_case__ , snake_case__ ) else: consistent &= aa == aa return consistent def a ( self , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Dict = True _lowerCAmelCase : tuple = tree_map(lambda snake_case__ : a.shape if isinstance(snake_case__ , torch.Tensor ) else a , snake_case__ , snake_case__ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case__ ) _lowerCAmelCase : Any = self._compare_arg_caches(self.cached_arg_data , snake_case__ ) else: # Otherwise, we can reuse the precomputed value _lowerCAmelCase : str = False if not consistent: _lowerCAmelCase : int = self._determine_favorable_chunk_size( snake_case__ , snake_case__ , snake_case__ , ) _lowerCAmelCase : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if tokenize_kwargs is None: _lowerCAmelCase : Tuple = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) _lowerCAmelCase : Tuple = truncation _lowerCAmelCase : int = tokenize_kwargs _lowerCAmelCase : Any = {} if return_tensors is not None: _lowerCAmelCase : Any = return_tensors return preprocess_params, {}, postprocess_params def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.framework _lowerCAmelCase : List[str] = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) return model_inputs def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model(**snake_case__ ) return model_outputs def a ( self , snake_case__ , snake_case__=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return super().__call__(*snake_case__ , **snake_case__ )
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """Salesforce/codegen-350M-mono""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = CodeGenTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , ) if kwargs.pop('add_bos_token' , snake_case__ ): _lowerCAmelCase : int = kwargs.pop('name_or_path' , '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n' F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n' 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) _lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Union[str, Any] = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : Any = add_prefix_space _lowerCAmelCase : List[Any] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = super().decode( token_ids=snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , ) if truncate_before_pattern is not None and len(snake_case__ ) > 0: _lowerCAmelCase : Tuple = self.truncate(snake_case__ , snake_case__ ) return decoded_text def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' def find_re(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : Any = pattern.search(snake_case__ , snake_case__ ) return m.start() if m else -1 _lowerCAmelCase : Any = [re.compile(snake_case__ , re.MULTILINE ) for pattern in truncate_before_pattern] _lowerCAmelCase : Union[str, Any] = list(re.finditer('^print' , snake_case__ , re.MULTILINE ) ) if len(snake_case__ ) > 1: _lowerCAmelCase : Optional[int] = completion[: prints[1].start()] _lowerCAmelCase : int = list(re.finditer('^def' , snake_case__ , re.MULTILINE ) ) if len(snake_case__ ) > 1: _lowerCAmelCase : Dict = completion[: defs[1].start()] _lowerCAmelCase : Any = 0 _lowerCAmelCase : int = [ pos for pos in [find_re(snake_case__ , snake_case__ , snake_case__ ) for terminal in terminals] if pos != -1 ] if len(snake_case__ ) > 0: return completion[: min(snake_case__ )] else: return completion
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase (): """simple docstring""" _lowerCAmelCase : List[Any] = ArgumentParser( description=( 'PyTorch TPU distributed training launch ' 'helper utility that will spawn up ' 'multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=_A , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=_A , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=_A ) return parser.parse_args() def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = parse_args() # Import training_script as a module. _lowerCAmelCase : List[str] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCAmelCase : int = script_fpath.stem _lowerCAmelCase : List[Any] = importlib.import_module(_A ) # Patch sys.argv _lowerCAmelCase : Optional[int] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowerCAmelCase : Tuple = None lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Optional[int] = { """camembert-base""": 5_12, } lowerCAmelCase : Tuple = """▁""" class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = CamembertTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=["<s>NOTUSED", "</s>NOTUSED"] , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Tuple = vocab_file _lowerCAmelCase : Tuple = False if not self.vocab_file else True def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase : Dict = [self.cls_token_id] _lowerCAmelCase : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Dict = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib lowerCAmelCase : int = get_logger() lowerCAmelCase : Optional[dict] = None class UpperCamelCase__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ): """simple docstring""" def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' super().__init__(features=snake_case__ ) import jax from jaxlib.xla_client import Device if isinstance(snake_case__ , snake_case__ ): raise ValueError( F'Expected {device} to be a `str` not {type(snake_case__ )}, as `jaxlib.xla_extension.Device` ' 'is not serializable neither with `pickle` nor with `dill`. Instead you can surround ' 'the device with `str()` to get its string identifier that will be internally mapped ' 'to the actual `jaxlib.xla_extension.Device`.' ) _lowerCAmelCase : Tuple = device if isinstance(snake_case__ , snake_case__ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _lowerCAmelCase : List[str] = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ' F'device: {str(jax.devices()[0] )}.' ) _lowerCAmelCase : Tuple = str(jax.devices()[0] ) _lowerCAmelCase : Optional[int] = jnp_array_kwargs @staticmethod def a ( ): '''simple docstring''' import jax return {str(snake_case__ ): device for device in jax.devices()} def a ( self , snake_case__ ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(snake_case__ , snake_case__ ) and column: if all( isinstance(snake_case__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(snake_case__ , axis=0 ) return column def a ( self , snake_case__ ): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ): return value elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _lowerCAmelCase : Tuple = {} if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _lowerCAmelCase : Union[str, Any] = {'dtype': jnp.intaa} else: _lowerCAmelCase : Optional[Any] = {'dtype': jnp.intaa} elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _lowerCAmelCase : Optional[Any] = {'dtype': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(snake_case__ , PIL.Image.Image ): _lowerCAmelCase : List[Any] = np.asarray(snake_case__ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _lowerCAmelCase : Union[str, Any] = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(snake_case__ , **{**default_dtype, **self.jnp_array_kwargs} ) def a ( self , snake_case__ ): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(snake_case__ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(snake_case__ , '__array__' ) and not isinstance(snake_case__ , jax.Array ): _lowerCAmelCase : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(snake_case__ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] ) elif isinstance(snake_case__ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] ) return self._tensorize(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(snake_case__ ) _lowerCAmelCase : List[str] = self.python_features_decoder.decode_row(snake_case__ ) return self.recursive_tensorize(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.numpy_arrow_extractor().extract_column(snake_case__ ) _lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] ) _lowerCAmelCase : int = self.recursive_tensorize(snake_case__ ) _lowerCAmelCase : Dict = self._consolidate(snake_case__ ) return column def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(snake_case__ ) _lowerCAmelCase : Union[str, Any] = self.python_features_decoder.decode_batch(snake_case__ ) _lowerCAmelCase : int = self.recursive_tensorize(snake_case__ ) for column_name in batch: _lowerCAmelCase : List[Any] = self._consolidate(batch[column_name] ) return batch
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline lowerCAmelCase : Any = """path-to-your-trained-model""" lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") lowerCAmelCase : str = """A photo of sks dog in a bucket""" lowerCAmelCase : List[str] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : Dict = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] lowerCAmelCase : List[str] = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] lowerCAmelCase : Optional[int] = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): lowerCAmelCase : int = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def lowercase (_A ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : List[Any] = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_A ): for j in range(_A ): _lowerCAmelCase : Tuple = [2_5_5, 2_5_5, 2_5_5] - img[i][j] return img if __name__ == "__main__": # read original image lowerCAmelCase : Optional[int] = imread("""image_data/lena.jpg""", 1) # convert to its negative lowerCAmelCase : int = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' from math import sqrt def lowercase (_A ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase (_A = 1_0_0_0_1 ): """simple docstring""" _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Optional[Any] = 1 while count != nth and number < 3: number += 1 if is_prime(_A ): count += 1 while count != nth: number += 2 if is_prime(_A ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger() def lowercase (_A , _A , _A , _A , _A = True ): """simple docstring""" print(f'Converting {name}...' ) with torch.no_grad(): if hidden_sizes == 1_2_8: if name[-1] == "S": _lowerCAmelCase : Optional[int] = timm.create_model('levit_128s' , pretrained=_A ) else: _lowerCAmelCase : Tuple = timm.create_model('levit_128' , pretrained=_A ) if hidden_sizes == 1_9_2: _lowerCAmelCase : Dict = timm.create_model('levit_192' , pretrained=_A ) if hidden_sizes == 2_5_6: _lowerCAmelCase : Union[str, Any] = timm.create_model('levit_256' , pretrained=_A ) if hidden_sizes == 3_8_4: _lowerCAmelCase : Dict = timm.create_model('levit_384' , pretrained=_A ) from_model.eval() _lowerCAmelCase : Optional[Any] = LevitForImageClassificationWithTeacher(_A ).eval() _lowerCAmelCase : Optional[int] = OrderedDict() _lowerCAmelCase : int = from_model.state_dict() _lowerCAmelCase : List[Any] = list(from_model.state_dict().keys() ) _lowerCAmelCase : Tuple = list(our_model.state_dict().keys() ) print(len(_A ) , len(_A ) ) for i in range(len(_A ) ): _lowerCAmelCase : List[Any] = weights[og_keys[i]] our_model.load_state_dict(_A ) _lowerCAmelCase : Optional[int] = torch.randn((2, 3, 2_2_4, 2_2_4) ) _lowerCAmelCase : Optional[int] = from_model(_A ) _lowerCAmelCase : Optional[int] = our_model(_A ).logits assert torch.allclose(_A , _A ), "The model logits don't match the original one." _lowerCAmelCase : Union[str, Any] = name print(_A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) _lowerCAmelCase : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'Pushed {checkpoint_name}' ) def lowercase (_A , _A = None , _A = True ): """simple docstring""" _lowerCAmelCase : Optional[Any] = 'imagenet-1k-id2label.json' _lowerCAmelCase : List[Any] = 1_0_0_0 _lowerCAmelCase : List[Any] = (1, num_labels) _lowerCAmelCase : str = 'huggingface/label-files' _lowerCAmelCase : Optional[Any] = num_labels _lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) _lowerCAmelCase : Tuple = {int(_A ): v for k, v in idalabel.items()} _lowerCAmelCase : List[str] = idalabel _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Any = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A ) _lowerCAmelCase : List[Any] = { 'levit-128S': 1_2_8, 'levit-128': 1_2_8, 'levit-192': 1_9_2, 'levit-256': 2_5_6, 'levit-384': 3_8_4, } _lowerCAmelCase : Any = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , _A , names_to_config[model_name] , _A , _A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , _A , _A , _A , _A ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowerCAmelCase : Optional[int] = parser.parse_args() lowerCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import requests lowerCAmelCase : List[str] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=""" def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(f'{i}.) {article["title"]}' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_A , _A ): raise TypeError('Input value must be a \'int\' type' ) return bin(_A ).count('1' ) if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCAmelCase : Optional[int] = logging.get_logger(__name__) def lowercase (_A , _A , _A ): """simple docstring""" return [ int(1_0_0_0 * (box[0] / width) ), int(1_0_0_0 * (box[1] / height) ), int(1_0_0_0 * (box[2] / width) ), int(1_0_0_0 * (box[3] / height) ), ] def lowercase (_A , _A , _A = None ): """simple docstring""" _lowerCAmelCase : Any = tesseract_config if tesseract_config is not None else '' # apply OCR _lowerCAmelCase : Union[str, Any] = to_pil_image(_A ) _lowerCAmelCase , _lowerCAmelCase : Tuple = pil_image.size _lowerCAmelCase : int = pytesseract.image_to_data(_A , lang=_A , output_type='dict' , config=_A ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates _lowerCAmelCase : List[Any] = [idx for idx, word in enumerate(_A ) if not word.strip()] _lowerCAmelCase : int = [word for idx, word in enumerate(_A ) if idx not in irrelevant_indices] _lowerCAmelCase : Any = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices] _lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices] _lowerCAmelCase : str = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices] _lowerCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _lowerCAmelCase : Optional[Any] = [] for x, y, w, h in zip(_A , _A , _A , _A ): _lowerCAmelCase : int = [x, y, x + w, y + h] actual_boxes.append(_A ) # finally, normalize the bounding boxes _lowerCAmelCase : Union[str, Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_A , _A , _A ) ) assert len(_A ) == len(_A ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = ["pixel_values"] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = size if size is not None else {'height': 224, 'width': 224} _lowerCAmelCase : List[str] = get_size_dict(snake_case__ ) _lowerCAmelCase : Any = do_resize _lowerCAmelCase : Optional[Any] = size _lowerCAmelCase : Optional[int] = resample _lowerCAmelCase : int = apply_ocr _lowerCAmelCase : Optional[int] = ocr_lang _lowerCAmelCase : Any = tesseract_config def a ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[int] = get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) _lowerCAmelCase : str = (size['height'], size['width']) return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase : Optional[Any] = size if size is not None else self.size _lowerCAmelCase : int = get_size_dict(snake_case__ ) _lowerCAmelCase : Dict = resample if resample is not None else self.resample _lowerCAmelCase : Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr _lowerCAmelCase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang _lowerCAmelCase : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config _lowerCAmelCase : Dict = make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) # All transformations expect numpy arrays. _lowerCAmelCase : int = [to_numpy_array(snake_case__ ) for image in images] if apply_ocr: requires_backends(self , 'pytesseract' ) _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [] for image in images: _lowerCAmelCase , _lowerCAmelCase : Optional[int] = apply_tesseract(snake_case__ , snake_case__ , snake_case__ ) words_batch.append(snake_case__ ) boxes_batch.append(snake_case__ ) if do_resize: _lowerCAmelCase : Union[str, Any] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) _lowerCAmelCase : Dict = [flip_channel_order(snake_case__ ) for image in images] _lowerCAmelCase : str = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] _lowerCAmelCase : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case__ ) if apply_ocr: _lowerCAmelCase : List[str] = words_batch _lowerCAmelCase : Optional[int] = boxes_batch return data
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' from typing import Any import numpy as np def lowercase (_A ): """simple docstring""" return np.array_equal(_A , matrix.conjugate().T ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = v.conjugate().T _lowerCAmelCase : Dict = v_star.dot(_A ) assert isinstance(_A , np.ndarray ) return (v_star_dot.dot(_A )) / (v_star.dot(_A )) def lowercase (): """simple docstring""" _lowerCAmelCase : Dict = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) _lowerCAmelCase : Tuple = np.array([[1], [2], [3]] ) assert is_hermitian(_A ), f'{a} is not hermitian.' print(rayleigh_quotient(_A , _A ) ) _lowerCAmelCase : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_A ), f'{a} is not hermitian.' assert rayleigh_quotient(_A , _A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" while second != 0: _lowerCAmelCase : List[Any] = first & second first ^= second _lowerCAmelCase : str = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] = int(input("""Enter the first number: """).strip()) lowerCAmelCase : List[str] = int(input("""Enter the second number: """).strip()) print(F'''{add(first, second) = }''')
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' def lowercase (_A = 1_0_0 ): """simple docstring""" _lowerCAmelCase : int = set() _lowerCAmelCase : Any = 0 _lowerCAmelCase : Optional[Any] = n + 1 # maximum limit for a in range(2 , _A ): for b in range(2 , _A ): _lowerCAmelCase : Optional[int] = a**b # calculates the current power collect_powers.add(_A ) # adds the result to the set return len(_A ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = ["image_processor", "tokenizer"] __magic_name__ = "CLIPImageProcessor" __magic_name__ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , snake_case__ , ) _lowerCAmelCase : Tuple = kwargs.pop('feature_extractor' ) _lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _lowerCAmelCase : Dict = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: _lowerCAmelCase : Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: _lowerCAmelCase : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.tokenizer.model_input_names _lowerCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = CTRLTokenizer __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowerCAmelCase : Optional[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] _lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : List[str] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] _lowerCAmelCase : Optional[int] = {'unk_token': '<unk>'} _lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = 'adapt react readapt apt' _lowerCAmelCase : Tuple = 'adapt react readapt apt' return input_text, output_text def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _lowerCAmelCase : List[str] = 'adapt react readapt apt' _lowerCAmelCase : Tuple = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() _lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token] _lowerCAmelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers lowerCAmelCase : Union[str, Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(_A ) ) _lowerCAmelCase : Union[str, Any] = os.path.join(_A , 'words.txt' ) _lowerCAmelCase : List[str] = '' with open(_A ) as f: _lowerCAmelCase : List[str] = f.readline() _lowerCAmelCase : List[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] _lowerCAmelCase : Tuple = [ word for word in [sum(ord(_A ) - 6_4 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_A ) if __name__ == "__main__": print(solution())
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = DiTPipeline __magic_name__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __magic_name__ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } __magic_name__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __magic_name__ = False def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Tuple = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case__ , ) _lowerCAmelCase : Tuple = AutoencoderKL() _lowerCAmelCase : str = DDIMScheduler() _lowerCAmelCase : Optional[Any] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Tuple = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Union[str, Any] = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = 'cpu' _lowerCAmelCase : Tuple = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images _lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase : List[Any] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case__ , 1E-3 ) def a ( self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=snake_case__ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def a ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = torch.manual_seed(0 ) _lowerCAmelCase : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _lowerCAmelCase : int = ['vase', 'umbrella', 'white shark', 'white wolf'] _lowerCAmelCase : Dict = pipe.get_label_ids(snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(snake_case__ , generator=snake_case__ , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[int] = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _lowerCAmelCase : Optional[int] = ['vase', 'umbrella'] _lowerCAmelCase : List[Any] = pipe.get_label_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) _lowerCAmelCase : Dict = pipe(snake_case__ , generator=snake_case__ , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case__ , snake_case__ ): _lowerCAmelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __get__( self , snake_case__ , snake_case__=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute' ) _lowerCAmelCase : Optional[int] = '__cached_' + self.fget.__name__ _lowerCAmelCase : List[Any] = getattr(snake_case__ , snake_case__ , snake_case__ ) if cached is None: _lowerCAmelCase : Any = self.fget(snake_case__ ) setattr(snake_case__ , snake_case__ , snake_case__ ) return cached def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'invalid truth value {val!r}' ) def lowercase (_A ): """simple docstring""" if is_torch_fx_proxy(_A ): return True if is_torch_available(): import torch if isinstance(_A , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_A , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_A , (jnp.ndarray, Tracer) ): return True return isinstance(_A , np.ndarray ) def lowercase (_A ): """simple docstring""" return isinstance(_A , np.ndarray ) def lowercase (_A ): """simple docstring""" return _is_numpy(_A ) def lowercase (_A ): """simple docstring""" import torch return isinstance(_A , torch.Tensor ) def lowercase (_A ): """simple docstring""" return False if not is_torch_available() else _is_torch(_A ) def lowercase (_A ): """simple docstring""" import torch return isinstance(_A , torch.device ) def lowercase (_A ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(_A ) def lowercase (_A ): """simple docstring""" import torch if isinstance(_A , _A ): if hasattr(_A , _A ): _lowerCAmelCase : Optional[Any] = getattr(_A , _A ) else: return False return isinstance(_A , torch.dtype ) def lowercase (_A ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(_A ) def lowercase (_A ): """simple docstring""" import tensorflow as tf return isinstance(_A , tf.Tensor ) def lowercase (_A ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(_A ) def lowercase (_A ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_A , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(_A ) return type(_A ) == tf.Tensor def lowercase (_A ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(_A ) def lowercase (_A ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(_A , jnp.ndarray ) def lowercase (_A ): """simple docstring""" return False if not is_flax_available() else _is_jax(_A ) def lowercase (_A ): """simple docstring""" if isinstance(_A , (dict, UserDict) ): return {k: to_py_obj(_A ) for k, v in obj.items()} elif isinstance(_A , (list, tuple) ): return [to_py_obj(_A ) for o in obj] elif is_tf_tensor(_A ): return obj.numpy().tolist() elif is_torch_tensor(_A ): return obj.detach().cpu().tolist() elif is_jax_tensor(_A ): return np.asarray(_A ).tolist() elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowercase (_A ): """simple docstring""" if isinstance(_A , (dict, UserDict) ): return {k: to_numpy(_A ) for k, v in obj.items()} elif isinstance(_A , (list, tuple) ): return np.array(_A ) elif is_tf_tensor(_A ): return obj.numpy() elif is_torch_tensor(_A ): return obj.detach().cpu().numpy() elif is_jax_tensor(_A ): return np.asarray(_A ) else: return obj class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = fields(self ) # Safety and consistency checks if not len(snake_case__ ): raise ValueError(F'{self.__class__.__name__} has no fields.' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' ) _lowerCAmelCase : List[str] = getattr(self , class_fields[0].name ) _lowerCAmelCase : str = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(snake_case__ ): if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Any = first_field.items() _lowerCAmelCase : Any = True else: try: _lowerCAmelCase : List[Any] = iter(snake_case__ ) _lowerCAmelCase : int = True except TypeError: _lowerCAmelCase : List[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(snake_case__ ): if ( not isinstance(snake_case__ , (list, tuple) ) or not len(snake_case__ ) == 2 or not isinstance(element[0] , snake_case__ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _lowerCAmelCase : Optional[int] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: _lowerCAmelCase : Dict = element[1] elif first_field is not None: _lowerCAmelCase : List[Any] = first_field else: for field in class_fields: _lowerCAmelCase : str = getattr(self , field.name ) if v is not None: _lowerCAmelCase : Any = v def __delitem__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' ) def __getitem__( self , snake_case__ ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , snake_case__ , snake_case__ ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(snake_case__ , snake_case__ ) super().__setattr__(snake_case__ , snake_case__ ) def __setitem__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__setitem__(snake_case__ , snake_case__ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" @classmethod def a ( cls , snake_case__ ): '''simple docstring''' raise ValueError( F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "longest" __magic_name__ = "max_length" __magic_name__ = "do_not_pad" class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "pt" __magic_name__ = "tf" __magic_name__ = "np" __magic_name__ = "jax" class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = context_managers _lowerCAmelCase : str = ExitStack() def __enter__( self ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(snake_case__ ) def __exit__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' self.stack.__exit__(*snake_case__ , **snake_case__ ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = infer_framework(_A ) if framework == "tf": _lowerCAmelCase : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = model_class.__name__ _lowerCAmelCase : Dict = infer_framework(_A ) if framework == "tf": _lowerCAmelCase : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Any = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowercase (_A , _A = "" , _A = "." ): """simple docstring""" def _flatten_dict(_A , _A="" , _A="." ): for k, v in d.items(): _lowerCAmelCase : Dict = str(_A ) + delimiter + str(_A ) if parent_key else k if v and isinstance(_A , _A ): yield from flatten_dict(_A , _A , delimiter=_A ).items() else: yield key, v return dict(_flatten_dict(_A , _A , _A ) ) @contextmanager def lowercase (_A , _A = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowercase (_A , _A=None ): """simple docstring""" if is_numpy_array(_A ): return np.transpose(_A , axes=_A ) elif is_torch_tensor(_A ): return array.T if axes is None else array.permute(*_A ) elif is_tf_tensor(_A ): import tensorflow as tf return tf.transpose(_A , perm=_A ) elif is_jax_tensor(_A ): return jnp.transpose(_A , axes=_A ) else: raise ValueError(f'Type not supported for transpose: {type(_A )}.' ) def lowercase (_A , _A ): """simple docstring""" if is_numpy_array(_A ): return np.reshape(_A , _A ) elif is_torch_tensor(_A ): return array.reshape(*_A ) elif is_tf_tensor(_A ): import tensorflow as tf return tf.reshape(_A , _A ) elif is_jax_tensor(_A ): return jnp.reshape(_A , _A ) else: raise ValueError(f'Type not supported for reshape: {type(_A )}.' ) def lowercase (_A , _A=None ): """simple docstring""" if is_numpy_array(_A ): return np.squeeze(_A , axis=_A ) elif is_torch_tensor(_A ): return array.squeeze() if axis is None else array.squeeze(dim=_A ) elif is_tf_tensor(_A ): import tensorflow as tf return tf.squeeze(_A , axis=_A ) elif is_jax_tensor(_A ): return jnp.squeeze(_A , axis=_A ) else: raise ValueError(f'Type not supported for squeeze: {type(_A )}.' ) def lowercase (_A , _A ): """simple docstring""" if is_numpy_array(_A ): return np.expand_dims(_A , _A ) elif is_torch_tensor(_A ): return array.unsqueeze(dim=_A ) elif is_tf_tensor(_A ): import tensorflow as tf return tf.expand_dims(_A , axis=_A ) elif is_jax_tensor(_A ): return jnp.expand_dims(_A , axis=_A ) else: raise ValueError(f'Type not supported for expand_dims: {type(_A )}.' ) def lowercase (_A ): """simple docstring""" if is_numpy_array(_A ): return np.size(_A ) elif is_torch_tensor(_A ): return array.numel() elif is_tf_tensor(_A ): import tensorflow as tf return tf.size(_A ) elif is_jax_tensor(_A ): return array.size else: raise ValueError(f'Type not supported for expand_dims: {type(_A )}.' ) def lowercase (_A , _A ): """simple docstring""" for key, value in auto_map.items(): if isinstance(_A , (tuple, list) ): _lowerCAmelCase : str = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: _lowerCAmelCase : Dict = f'{repo_id}--{value}' return auto_map def lowercase (_A ): """simple docstring""" for base_class in inspect.getmro(_A ): _lowerCAmelCase : List[Any] = base_class.__module__ _lowerCAmelCase : str = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'Could not infer framework from class {model_class}.' )
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1