code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): return [sentence[i : i + ngram_size] for i in range(len(__lowerCAmelCase ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Any = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'blip_text_model' def __init__( self ,_lowerCAmelCase=3_05_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=8 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1_02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): super().__init__( pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,sep_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = encoder_hidden_size lowerCamelCase__ = intermediate_size lowerCamelCase__ = projection_dim lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = hidden_act lowerCamelCase__ = initializer_range lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = is_decoder lowerCamelCase__ = use_cache @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ): cls._set_token_in_kwargs(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get("""model_type""" ) == "blip": lowerCamelCase__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'blip_vision_model' def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=3_84 ,_lowerCAmelCase=16 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-10 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = hidden_size lowerCamelCase__ = intermediate_size lowerCamelCase__ = projection_dim lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = patch_size lowerCamelCase__ = image_size lowerCamelCase__ = initializer_range lowerCamelCase__ = attention_dropout lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = hidden_act @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ): cls._set_token_in_kwargs(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("""model_type""" ) == "blip": lowerCamelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'blip' _UpperCamelCase = True def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2.6592 ,_lowerCAmelCase=2_56 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) if text_config is None: lowerCamelCase__ = {} logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" ) if vision_config is None: lowerCamelCase__ = {} logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" ) lowerCamelCase__ = BlipTextConfig(**_lowerCAmelCase ) lowerCamelCase__ = BlipVisionConfig(**_lowerCAmelCase ) lowerCamelCase__ = self.vision_config.hidden_size lowerCamelCase__ = projection_dim lowerCamelCase__ = logit_scale_init_value lowerCamelCase__ = 1.0 lowerCamelCase__ = 0.02 lowerCamelCase__ = image_text_hidden_size @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.text_config.to_dict() lowerCamelCase__ = self.vision_config.to_dict() lowerCamelCase__ = self.__class__.model_type return output
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = num_patches + 1 def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMSNConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = ViTMSNModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.type_sequence_label_size lowerCamelCase__ = ViTMSNForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = ViTMSNForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _UpperCamelCase = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = ViTMSNModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = ViTMSNModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): torch.manual_seed(2 ) lowerCamelCase__ = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['input_features', 'attention_mask'] def __init__( self ,_lowerCAmelCase=80 ,_lowerCAmelCase=1_60_00 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=10 ,_lowerCAmelCase=25 ,_lowerCAmelCase="hamming_window" ,_lowerCAmelCase=3_2768.0 ,_lowerCAmelCase=0.97 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): super().__init__(feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = feature_size lowerCamelCase__ = sampling_rate lowerCamelCase__ = padding_value lowerCamelCase__ = hop_length lowerCamelCase__ = win_length lowerCamelCase__ = frame_signal_scale lowerCamelCase__ = preemphasis_coeff lowerCamelCase__ = mel_floor lowerCamelCase__ = normalize_means lowerCamelCase__ = normalize_vars lowerCamelCase__ = win_function lowerCamelCase__ = return_attention_mask lowerCamelCase__ = win_length * sampling_rate // 10_00 lowerCamelCase__ = hop_length * sampling_rate // 10_00 lowerCamelCase__ = optimal_fft_length(self.sample_size ) lowerCamelCase__ = (self.n_fft // 2) + 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ): if self.win_function == "hamming_window": lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=_lowerCAmelCase ) else: lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function ) lowerCamelCase__ = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) lowerCamelCase__ = spectrogram( one_waveform * self.frame_signal_scale ,window=_lowerCAmelCase ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=_lowerCAmelCase ,preemphasis=self.preemphasis_coeff ,mel_filters=_lowerCAmelCase ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make sure we normalize float32 arrays if self.normalize_means: lowerCamelCase__ = x[:input_length].mean(axis=0 ) lowerCamelCase__ = np.subtract(_lowerCAmelCase ,_lowerCAmelCase ) if self.normalize_vars: lowerCamelCase__ = x[:input_length].std(axis=0 ) lowerCamelCase__ = np.divide(_lowerCAmelCase ,_lowerCAmelCase ) if input_length < x.shape[0]: lowerCamelCase__ = padding_value # make sure array is in float32 lowerCamelCase__ = x.astype(np.floataa ) return x def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(_lowerCAmelCase ,_lowerCAmelCase ,self.padding_value ) for x, n in zip(_lowerCAmelCase ,_lowerCAmelCase )] def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase__ = is_batched_numpy or ( isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ): lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa ) elif isinstance(_lowerCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ = [raw_speech] # extract fbank features lowerCamelCase__ = [self._extract_mfsc_features(_lowerCAmelCase ) for one_waveform in raw_speech] # convert into correct format for padding lowerCamelCase__ = BatchFeature({"""input_features""": features} ) lowerCamelCase__ = self.pad( _lowerCAmelCase ,padding=_lowerCAmelCase ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,**_lowerCAmelCase ,) # make sure list is in array format lowerCamelCase__ = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,_lowerCAmelCase ): lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for feature in input_features] lowerCamelCase__ = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: lowerCamelCase__ = ( np.array(_lowerCAmelCase ,dtype=np.intaa ) if self._get_padding_strategies(_lowerCAmelCase ,max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) lowerCamelCase__ = self.normalize( padded_inputs["""input_features"""] ,attention_mask=_lowerCAmelCase ) if return_tensors is not None: lowerCamelCase__ = padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str ): assert column_title.isupper() lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 lowerCamelCase__ = 0 while index >= 0: lowerCamelCase__ = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets UpperCamelCase : Tuple = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' UpperCamelCase : str = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' UpperCamelCase : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=1 ,_lowerCAmelCase="binary" ,_lowerCAmelCase=None ): lowerCamelCase__ = fa_score( _lowerCAmelCase ,_lowerCAmelCase ,labels=_lowerCAmelCase ,pos_label=_lowerCAmelCase ,average=_lowerCAmelCase ,sample_weight=_lowerCAmelCase ) return {"f1": float(_lowerCAmelCase ) if score.size == 1 else score}
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int = 1000 ): return sum(e for e in range(3 , __lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' from math import sqrt def A__ ( __lowerCAmelCase : int = 100_0000 ): lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCAmelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = (DEISMultistepScheduler,) _UpperCamelCase = (('num_inference_steps', 25),) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, } config.update(**_lowerCAmelCase ) return config def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ,**_lowerCAmelCase ): lowerCamelCase__ = dict(self.forward_default_kwargs ) lowerCamelCase__ = kwargs.pop("""num_inference_steps""" ,_lowerCAmelCase ) lowerCamelCase__ = self.dummy_sample lowerCamelCase__ = 0.1 * sample lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) lowerCamelCase__ = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase__ , lowerCamelCase__ = sample, sample for t in range(_lowerCAmelCase ,time_step + scheduler.config.solver_order + 1 ): lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample lowerCamelCase__ = new_scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ,**_lowerCAmelCase ): lowerCamelCase__ = dict(self.forward_default_kwargs ) lowerCamelCase__ = kwargs.pop("""num_inference_steps""" ,_lowerCAmelCase ) lowerCamelCase__ = self.dummy_sample lowerCamelCase__ = 0.1 * sample lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) lowerCamelCase__ = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample lowerCamelCase__ = new_scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): if scheduler is None: lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = 10 lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample return sample def UpperCamelCase_ ( self ): lowerCamelCase__ = dict(self.forward_default_kwargs ) lowerCamelCase__ = kwargs.pop("""num_inference_steps""" ,_lowerCAmelCase ) for scheduler_class in self.scheduler_classes: lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = self.dummy_sample lowerCamelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_lowerCAmelCase ,"""set_timesteps""" ): scheduler.set_timesteps(_lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(_lowerCAmelCase ,"""set_timesteps""" ): lowerCamelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10] lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order] lowerCamelCase__ = scheduler.timesteps[5] lowerCamelCase__ = scheduler.timesteps[6] lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def UpperCamelCase_ ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCamelCase__ = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCamelCase__ = self.full_loop(scheduler=_lowerCAmelCase ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 lowerCamelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCamelCase__ = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCamelCase__ = DEISMultistepScheduler.from_config(scheduler.config ) lowerCamelCase__ = self.full_loop(scheduler=_lowerCAmelCase ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def UpperCamelCase_ ( self ): for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,sample_max_value=_lowerCAmelCase ,algorithm_type="""deis""" ,solver_order=_lowerCAmelCase ,solver_type=_lowerCAmelCase ,) def UpperCamelCase_ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase ,solver_type=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,algorithm_type=_lowerCAmelCase ,) lowerCamelCase__ = self.full_loop( solver_order=_lowerCAmelCase ,solver_type=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,algorithm_type=_lowerCAmelCase ,) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def UpperCamelCase_ ( self ): self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=_lowerCAmelCase ,time_step=0 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop() lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop(prediction_type="""v_prediction""" ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(thresholding=_lowerCAmelCase ,dynamic_thresholding_ratio=0 ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = 10 lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase__ = (boundary[1] - boundary[0]) / steps lowerCamelCase__ = boundary[0] lowerCamelCase__ = boundary[1] lowerCamelCase__ = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = 0.0 y += (h / 2.0) * f(__lowerCAmelCase ) for i in x_i: # print(i) y += h * f(__lowerCAmelCase ) y += (h / 2.0) * f(__lowerCAmelCase ) return y def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ): lowerCamelCase__ = a + h while x < (b - h): yield x lowerCamelCase__ = x + h def A__ ( __lowerCAmelCase : Dict ): # enter your function here lowerCamelCase__ = (x - 0) * (x - 0) return y def A__ ( ): lowerCamelCase__ = 0.0 # Lower bound of integration lowerCamelCase__ = 1.0 # Upper bound of integration lowerCamelCase__ = 10.0 # define number of steps or resolution lowerCamelCase__ = [a, b] # define boundary of integration lowerCamelCase__ = method_a(__lowerCAmelCase , __lowerCAmelCase ) print(F'''y = {y}''' ) if __name__ == "__main__": main()
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase__ = 1 lowerCamelCase__ = 1 while repunit: lowerCamelCase__ = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def A__ ( __lowerCAmelCase : int = 100_0000 ): lowerCamelCase__ = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__lowerCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'beit' def __init__( self ,_lowerCAmelCase=81_92 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=16 ,_lowerCAmelCase=3 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=True ,_lowerCAmelCase=[3, 5, 7, 11] ,_lowerCAmelCase=[1, 2, 3, 6] ,_lowerCAmelCase=True ,_lowerCAmelCase=0.4 ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=1 ,_lowerCAmelCase=False ,_lowerCAmelCase=2_55 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = use_mask_token lowerCamelCase__ = use_absolute_position_embeddings lowerCamelCase__ = use_relative_position_bias lowerCamelCase__ = use_shared_relative_position_bias lowerCamelCase__ = layer_scale_init_value lowerCamelCase__ = drop_path_rate lowerCamelCase__ = use_mean_pooling # decode head attributes (semantic segmentation) lowerCamelCase__ = out_indices lowerCamelCase__ = pool_scales # auxiliary head attributes (semantic segmentation) lowerCamelCase__ = use_auxiliary_head lowerCamelCase__ = auxiliary_loss_weight lowerCamelCase__ = auxiliary_channels lowerCamelCase__ = auxiliary_num_convs lowerCamelCase__ = auxiliary_concat_input lowerCamelCase__ = semantic_loss_ignore_index class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = version.parse('1.11' ) @property def UpperCamelCase_ ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCamelCase_ ( self ): return 1E-4
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['note_seq'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""note_seq"""] )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : list[str] | None = None ): lowerCamelCase__ = word_bank or [] # create a table lowerCamelCase__ = len(__lowerCAmelCase ) + 1 lowerCamelCase__ = [] for _ in range(__lowerCAmelCase ): table.append([] ) # seed value lowerCamelCase__ = [[]] # because empty string has empty combination # iterate through the indices for i in range(__lowerCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__lowerCAmelCase )] == word: lowerCamelCase__ = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__lowerCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__lowerCAmelCase )]: combination.reverse() return table[len(__lowerCAmelCase )] if __name__ == "__main__": print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa'])) print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't'])) print( all_construct( 'hexagonosaurus', ['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'], ) )
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'mra' def __init__( self ,_lowerCAmelCase=5_02_65 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=4 ,_lowerCAmelCase="full" ,_lowerCAmelCase=0 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,**_lowerCAmelCase ,): super().__init__(pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = type_vocab_size lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = position_embedding_type lowerCamelCase__ = block_per_row lowerCamelCase__ = approx_mode lowerCamelCase__ = initial_prior_first_n_blocks lowerCamelCase__ = initial_prior_diagonal_n_blocks
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch UpperCamelCase : Dict = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['pixel_values'] def __init__( self ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = 1 / 2_55 ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 2_56} lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase ) lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,param_name="""crop_size""" ) lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = resample lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_rescale lowerCamelCase__ = rescale_factor lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = PILImageResampling.BICUBIC ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase__ = get_resize_output_image_size(_lowerCAmelCase ,size=size["""shortest_edge"""] ,default_to_square=_lowerCAmelCase ) return resize(_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase ,size=(size["""height"""], size["""width"""]) ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ): return rescale(_lowerCAmelCase ,scale=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): return normalize(_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,**_lowerCAmelCase ,): lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ = size if size is not None else self.size lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase ) lowerCamelCase__ = resample if resample is not None else self.resample lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,param_name="""crop_size""" ) lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ = image_std if image_std is not None else self.image_std lowerCamelCase__ = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase__ = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase__ = [self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase__ = [self.center_crop(image=_lowerCAmelCase ,size=_lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase__ = [self.rescale(image=_lowerCAmelCase ,scale=_lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase__ = [self.normalize(image=_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ) for image in images] lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase ) for image in images] lowerCamelCase__ = {"""pixel_values""": images} return BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_lowerCAmelCase ): lowerCamelCase__ = target_sizes.numpy() lowerCamelCase__ = [] for idx in range(len(_lowerCAmelCase ) ): lowerCamelCase__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=_lowerCAmelCase ) lowerCamelCase__ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_lowerCAmelCase ) else: lowerCamelCase__ = logits.argmax(dim=1 ) lowerCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""hidden_sizes""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""neck_hidden_sizes""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""num_attention_heads""" ) ) class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=6_40 ,_lowerCAmelCase=4 ,_lowerCAmelCase="silu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = last_hidden_size lowerCamelCase__ = num_attention_heads lowerCamelCase__ = hidden_act lowerCamelCase__ = conv_kernel_size lowerCamelCase__ = output_stride lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = classifier_dropout_prob lowerCamelCase__ = use_labels lowerCamelCase__ = is_training lowerCamelCase__ = num_labels lowerCamelCase__ = initializer_range lowerCamelCase__ = scope def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): return MobileViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = MobileViTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MobileViTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MobileViTForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) _UpperCamelCase = ( { 'feature-extraction': MobileViTModel, 'image-classification': MobileViTForImageClassification, 'image-segmentation': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTModelTester(self ) lowerCamelCase__ = MobileViTConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViT does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MobileViT does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MobileViT does not output attentions""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.hidden_states lowerCamelCase__ = 5 self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ = 2 for i in range(len(_lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,) divisor *= 2 self.assertEqual(self.model_tester.output_stride ,divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = MobileViTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(_lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) lowerCamelCase__ = model.to(_lowerCAmelCase ) lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) lowerCamelCase__ = outputs.logits # verify the logits lowerCamelCase__ = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] ,device=_lowerCAmelCase ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) lowerCamelCase__ = model.to(_lowerCAmelCase ) lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) lowerCamelCase__ = outputs.logits.detach().cpu() lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ,target_sizes=[(50, 60)] ) lowerCamelCase__ = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape ,_lowerCAmelCase ) lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ) lowerCamelCase__ = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = RobertaTokenizer _UpperCamelCase = RobertaTokenizerFast _UpperCamelCase = True _UpperCamelCase = {'cls_token': '<s>'} def UpperCamelCase_ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) ) lowerCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCamelCase__ = {"""unk_token""": """<unk>"""} lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """lower newer""" lowerCamelCase__ = """lower newer""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowerCamelCase__ = tokenizer.tokenize(_lowerCAmelCase ) # , add_prefix_space=True) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokens + [tokenizer.unk_token] lowerCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" ,add_special_tokens=_lowerCAmelCase ) ,[0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" ,add_special_tokens=_lowerCAmelCase ) ,[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] ,) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode( """sequence builders""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode( """sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = """Encode this sequence.""" lowerCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_lowerCAmelCase ,_lowerCAmelCase ) # Testing spaces after special tokens lowerCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase )} ) # mask token has a left space lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) lowerCamelCase__ = """Encode <mask> sequence""" lowerCamelCase__ = """Encode <mask>sequence""" lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ) lowerCamelCase__ = encoded.index(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ) lowerCamelCase__ = encoded.index(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = self.tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = """A, <mask> AllenNLP sentence.""" lowerCamelCase__ = tokenizer_r.encode_plus(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.encode_plus(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _lowerCAmelCase ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( _lowerCAmelCase ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCamelCase_ ( self ): for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] ,_lowerCAmelCase ) self.assertEqual(post_processor_state["""add_prefix_space"""] ,_lowerCAmelCase ) self.assertEqual(post_processor_state["""trim_offsets"""] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` lowerCamelCase__ = F'''{text_of_1_token} {text_of_1_token}''' lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,) lowerCamelCase__ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,use_fast=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r(_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) ,)
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = 384 if "tiny" in model_name: lowerCamelCase__ = [3, 3, 9, 3] lowerCamelCase__ = [96, 192, 384, 768] if "small" in model_name: lowerCamelCase__ = [3, 3, 27, 3] lowerCamelCase__ = [96, 192, 384, 768] if "base" in model_name: lowerCamelCase__ = [3, 3, 27, 3] lowerCamelCase__ = [128, 256, 512, 1024] lowerCamelCase__ = 512 if "large" in model_name: lowerCamelCase__ = [3, 3, 27, 3] lowerCamelCase__ = [192, 384, 768, 1536] lowerCamelCase__ = 768 if "xlarge" in model_name: lowerCamelCase__ = [3, 3, 27, 3] lowerCamelCase__ = [256, 512, 1024, 2048] lowerCamelCase__ = 1024 # set label information lowerCamelCase__ = 150 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = ConvNextConfig( depths=__lowerCAmelCase , hidden_sizes=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) lowerCamelCase__ = UperNetConfig( backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , ) return config def A__ ( __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = [] # fmt: off # stem rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") ) rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = dct.pop(__lowerCAmelCase ) lowerCamelCase__ = val def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): lowerCamelCase__ = { """upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""", """upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""", """upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""", """upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""", """upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""", } lowerCamelCase__ = model_name_to_url[model_name] lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""state_dict"""] lowerCamelCase__ = get_upernet_config(__lowerCAmelCase ) lowerCamelCase__ = UperNetForSemanticSegmentation(__lowerCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) if "bn" in key: lowerCamelCase__ = key.replace("""bn""" , """batch_norm""" ) lowerCamelCase__ = val # rename keys lowerCamelCase__ = create_rename_keys(__lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) # verify on image lowerCamelCase__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" ) lowerCamelCase__ = SegformerImageProcessor() lowerCamelCase__ = processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values with torch.no_grad(): lowerCamelCase__ = model(__lowerCAmelCase ) if model_name == "upernet-convnext-tiny": lowerCamelCase__ = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": lowerCamelCase__ = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": lowerCamelCase__ = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": lowerCamelCase__ = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": lowerCamelCase__ = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCamelCase : Any = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,torchscript=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" ,"""Cant do half precision""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,fpaa=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) # set architectures equal to `None` lowerCamelCase__ = None lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" ,"""Can't do half precision""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tinier_bart""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tinier_bart""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,train_memory_csv_file=os.path.join(_lowerCAmelCase ,"""train_mem.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,train_time_csv_file=os.path.join(_lowerCAmelCase ,"""train_time.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : List[Any] = { 'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json', } class UpperCamelCase__ (a ,a ): '''simple docstring''' _UpperCamelCase = 'focalnet' def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=False ,_lowerCAmelCase=[1_92, 3_84, 7_68, 7_68] ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[2, 2, 2, 2] ,_lowerCAmelCase=[3, 3, 3, 3] ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=False ,_lowerCAmelCase=1E-4 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=32 ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = use_conv_embed lowerCamelCase__ = hidden_sizes lowerCamelCase__ = depths lowerCamelCase__ = focal_levels lowerCamelCase__ = focal_windows lowerCamelCase__ = hidden_act lowerCamelCase__ = mlp_ratio lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = use_layerscale lowerCamelCase__ = layerscale_value lowerCamelCase__ = use_post_layernorm lowerCamelCase__ = use_post_layernorm_in_modulation lowerCamelCase__ = normalize_modulator lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = encoder_stride lowerCamelCase__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(self.depths ) + 1 )] lowerCamelCase__ , lowerCamelCase__ = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase ,out_indices=_lowerCAmelCase ,stage_names=self.stage_names )
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' from bisect import bisect from itertools import accumulate def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = [i[0] for i in r], [i[1] for i in r] lowerCamelCase__ = list(accumulate(__lowerCAmelCase ) ) lowerCamelCase__ = bisect(__lowerCAmelCase , __lowerCAmelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = size lowerCamelCase__ = [0] * size lowerCamelCase__ = [0] * size @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): return index | (index + 1) @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): return (index & (index + 1)) - 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = value while index < self.size: lowerCamelCase__ = self.get_prev(_lowerCAmelCase ) + 1 if current_left_border == index: lowerCamelCase__ = value else: lowerCamelCase__ = max(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.get_next(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): right -= 1 # Because of right is exclusive lowerCamelCase__ = 0 while left <= right: lowerCamelCase__ = self.get_prev(_lowerCAmelCase ) if left <= current_left: lowerCamelCase__ = max(_lowerCAmelCase ,self.tree[right] ) lowerCamelCase__ = current_left else: lowerCamelCase__ = max(_lowerCAmelCase ,self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): lowerCamelCase__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def A__ ( ): print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' import math UpperCamelCase : Union[str, Any] = 10 UpperCamelCase : Optional[Any] = 7 UpperCamelCase : str = BALLS_PER_COLOUR * NUM_COLOURS def A__ ( __lowerCAmelCase : int = 20 ): lowerCamelCase__ = math.comb(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase ) lowerCamelCase__ = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ , lowerCamelCase__ = image.size lowerCamelCase__ , lowerCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) lowerCamelCase__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0 lowerCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) return 2.0 * image - 1.0 class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): super().__init__() self.register_modules(vqvae=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) @torch.no_grad() def __call__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,): if isinstance(_lowerCAmelCase ,PIL.Image.Image ): lowerCamelCase__ = 1 elif isinstance(_lowerCAmelCase ,torch.Tensor ): lowerCamelCase__ = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_lowerCAmelCase )}''' ) if isinstance(_lowerCAmelCase ,PIL.Image.Image ): lowerCamelCase__ = preprocess(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) lowerCamelCase__ = next(self.unet.parameters() ).dtype lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase ) lowerCamelCase__ = image.to(device=self.device ,dtype=_lowerCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_lowerCAmelCase ,device=self.device ) lowerCamelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase__ = {} if accepts_eta: lowerCamelCase__ = eta for t in self.progress_bar(_lowerCAmelCase ): # concat latents and low resolution image in the channel dimension. lowerCamelCase__ = torch.cat([latents, image] ,dim=1 ) lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) # predict the noise residual lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample # decode the image latents with the VQVAE lowerCamelCase__ = self.vqvae.decode(_lowerCAmelCase ).sample lowerCamelCase__ = torch.clamp(_lowerCAmelCase ,-1.0 ,1.0 ) lowerCamelCase__ = image / 2 + 0.5 lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCAmelCase )
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase : Optional[int] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" ,_lowerCAmelCase ,) super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup UpperCamelCase : int = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def A__ ( __lowerCAmelCase : Union[str, Any] ): # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') UpperCamelCase : Union[str, Any] = parser.parse_args() if args.check_lib: UpperCamelCase : Optional[int] = importlib.import_module('transformers') UpperCamelCase : int = Path(transformers_module.__file__).parent else: UpperCamelCase : List[str] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase : Dict = logging.get_logger(__name__) class UpperCamelCase__ (enum.Enum ): '''simple docstring''' _UpperCamelCase = 0 _UpperCamelCase = 1 @add_end_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'generated' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): lowerCamelCase__ = {} if truncation is not None: lowerCamelCase__ = truncation lowerCamelCase__ = generate_kwargs lowerCamelCase__ = {} if return_tensors is not None and return_type is None: lowerCamelCase__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowerCamelCase__ = return_type if clean_up_tokenization_spaces is not None: lowerCamelCase__ = clean_up_tokenization_spaces if stop_sequence is not None: lowerCamelCase__ = self.tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) if len(_lowerCAmelCase ) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""" ) lowerCamelCase__ = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): return True def UpperCamelCase_ ( self ,*_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.model.config.prefix if self.model.config.prefix is not None else """""" if isinstance(args[0] ,_lowerCAmelCase ): if self.tokenizer.pad_token_id is None: raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" ) lowerCamelCase__ = ([prefix + arg for arg in args[0]],) lowerCamelCase__ = True elif isinstance(args[0] ,_lowerCAmelCase ): lowerCamelCase__ = (prefix + args[0],) lowerCamelCase__ = False else: raise ValueError( F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' ) lowerCamelCase__ = self.tokenizer(*_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): lowerCamelCase__ = super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase ) if ( isinstance(args[0] ,_lowerCAmelCase ) and all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for el in args[0] ) and all(len(_lowerCAmelCase ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**_lowerCAmelCase ): lowerCamelCase__ = self._parse_and_tokenize(_lowerCAmelCase ,truncation=_lowerCAmelCase ,**_lowerCAmelCase ) return inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,**_lowerCAmelCase ): if self.framework == "pt": lowerCamelCase__ , lowerCamelCase__ = model_inputs["""input_ids"""].shape elif self.framework == "tf": lowerCamelCase__ , lowerCamelCase__ = tf.shape(model_inputs["""input_ids"""] ).numpy() lowerCamelCase__ = generate_kwargs.get("""min_length""" ,self.model.config.min_length ) lowerCamelCase__ = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) self.check_inputs(_lowerCAmelCase ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] ) lowerCamelCase__ = self.model.generate(**_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = output_ids.shape[0] if self.framework == "pt": lowerCamelCase__ = output_ids.reshape(_lowerCAmelCase ,out_b // in_b ,*output_ids.shape[1:] ) elif self.framework == "tf": lowerCamelCase__ = tf.reshape(_lowerCAmelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=ReturnType.TEXT ,_lowerCAmelCase=False ): lowerCamelCase__ = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowerCamelCase__ = {F'''{self.return_name}_token_ids''': output_ids} elif return_type == ReturnType.TEXT: lowerCamelCase__ = { F'''{self.return_name}_text''': self.tokenizer.decode( _lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,clean_up_tokenization_spaces=_lowerCAmelCase ,) } records.append(_lowerCAmelCase ) return records @add_end_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'summary' def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if max_length < min_length: logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' ) if input_length < max_length: logger.warning( F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ''' """a summarization task, where outputs shorter than the input are typically wanted, you might """ F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' ) @add_end_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'translation' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if input_length > 0.9 * max_length: logger.warning( F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ''' """increasing your max_length manually, e.g. translator('...', max_length=400)""" ) return True def UpperCamelCase_ ( self ,*_lowerCAmelCase ,_lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE ,_lowerCAmelCase=None ,_lowerCAmelCase=None ): if getattr(self.tokenizer ,"""_build_translation_inputs""" ,_lowerCAmelCase ): return self.tokenizer._build_translation_inputs( *_lowerCAmelCase ,return_tensors=self.framework ,truncation=_lowerCAmelCase ,src_lang=_lowerCAmelCase ,tgt_lang=_lowerCAmelCase ) else: return super()._parse_and_tokenize(*_lowerCAmelCase ,truncation=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = super()._sanitize_parameters(**_lowerCAmelCase ) if src_lang is not None: lowerCamelCase__ = src_lang if tgt_lang is not None: lowerCamelCase__ = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowerCamelCase__ = kwargs.get("""task""" ,self.task ) lowerCamelCase__ = task.split("""_""" ) if task and len(_lowerCAmelCase ) == 4: # translation, XX, to YY lowerCamelCase__ = items[1] lowerCamelCase__ = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase )
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase__ = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ,streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase__ = cs.out[:-1] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.decode(greedy_ids[0] ) lowerCamelCase__ = TextIteratorStreamer(_lowerCAmelCase ) lowerCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} lowerCamelCase__ = Thread(target=model.generate ,kwargs=_lowerCAmelCase ) thread.start() lowerCamelCase__ = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ) lowerCamelCase__ = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase__ = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase__ = TextStreamer(_lowerCAmelCase ,skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ,streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase__ = cs.out[:-1] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase__ = AutoTokenizer.from_pretrained("""distilgpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = torch.ones((1, 5) ,device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase__ = TextStreamer(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase ,max_new_tokens=1 ,do_sample=_lowerCAmelCase ,streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase__ = cs.out[:-1] # Remove the final "\n" lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = TextIteratorStreamer(_lowerCAmelCase ,timeout=0.001 ) lowerCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} lowerCamelCase__ = Thread(target=model.generate ,kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): lowerCamelCase__ = """""" for new_text in streamer: streamer_text += new_text
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = '▁' UpperCamelCase : int = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'facebook/mbart-large-50-one-to-many-mmt': ( 'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model' ), } } UpperCamelCase : str = { 'facebook/mbart-large-50-one-to-many-mmt': 10_24, } # fmt: off UpperCamelCase : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI'] class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = ['input_ids', 'attention_mask'] _UpperCamelCase = [] _UpperCamelCase = [] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCamelCase__ = kwargs.get("""additional_special_tokens""" ,[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_lowerCAmelCase ,tgt_lang=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCamelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCamelCase__ = 1 lowerCamelCase__ = len(self.sp_model ) lowerCamelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCAmelCase ) } lowerCamelCase__ = {v: k for k, v in self.lang_code_to_id.items()} lowerCamelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCamelCase__ = src_lang if src_lang is not None else """en_XX""" lowerCamelCase__ = self.lang_code_to_id[self._src_lang] lowerCamelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase_ ( self ): return self._src_lang @src_lang.setter def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCamelCase__ = self.sp_model.PieceToId(_lowerCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = [1] * len(self.prefix_tokens ) lowerCamelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) lowerCamelCase__ = src_lang lowerCamelCase__ = self(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = self.convert_tokens_to_ids(_lowerCAmelCase ) lowerCamelCase__ = tgt_lang_id return inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = "en_XX" ,_lowerCAmelCase = None ,_lowerCAmelCase = "ro_RO" ,**_lowerCAmelCase ,): lowerCamelCase__ = src_lang lowerCamelCase__ = tgt_lang return super().prepare_seqaseq_batch(_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.lang_code_to_id[src_lang] lowerCamelCase__ = [self.cur_lang_code_id] lowerCamelCase__ = [self.eos_token_id] def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.lang_code_to_id[tgt_lang] lowerCamelCase__ = [self.cur_lang_code_id] lowerCamelCase__ = [self.eos_token_id]
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Dict = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : Union[str, Any] = { 'vocab_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-german-cased': ( 'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json' ), 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json' ), }, } UpperCamelCase : List[str] = { 'distilbert-base-uncased': 5_12, 'distilbert-base-uncased-distilled-squad': 5_12, 'distilbert-base-cased': 5_12, 'distilbert-base-cased-distilled-squad': 5_12, 'distilbert-base-german-cased': 5_12, 'distilbert-base-multilingual-cased': 5_12, } UpperCamelCase : int = { 'distilbert-base-uncased': {'do_lower_case': True}, 'distilbert-base-uncased-distilled-squad': {'do_lower_case': True}, 'distilbert-base-cased': {'do_lower_case': False}, 'distilbert-base-cased-distilled-squad': {'do_lower_case': False}, 'distilbert-base-german-cased': {'do_lower_case': False}, 'distilbert-base-multilingual-cased': {'do_lower_case': False}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = ['input_ids', 'attention_mask'] _UpperCamelCase = DistilBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = (DDIMParallelScheduler,) _UpperCamelCase = (('eta', 0.0), ('num_inference_steps', 50)) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """clip_sample""": True, } config.update(**_lowerCAmelCase ) return config def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = 10, 0.0 lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for t in scheduler.timesteps: lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample return sample def UpperCamelCase_ ( self ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowerCAmelCase ) lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(steps_offset=1 ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def UpperCamelCase_ ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase ,beta_end=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,sample_max_value=_lowerCAmelCase ,) def UpperCamelCase_ ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 5_00] ): self.check_over_forward(time_step=_lowerCAmelCase ,num_inference_steps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_lowerCAmelCase ,eta=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 ,4_00 ) - 0.1_4771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 ,9_60 ) - 0.3_2460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ,4_86 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ,9_98 ) - 0.02 ) ) < 1E-5 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = 10, 0.0 scheduler.set_timesteps(_lowerCAmelCase ) lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter lowerCamelCase__ = self.dummy_sample_deter + 0.1 lowerCamelCase__ = self.dummy_sample_deter - 0.1 lowerCamelCase__ = samplea.shape[0] lowerCamelCase__ = torch.stack([samplea, samplea, samplea] ,dim=0 ) lowerCamelCase__ = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 ,_lowerCAmelCase ) lowerCamelCase__ = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) lowerCamelCase__ = scheduler.batch_step_no_noise(_lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_lowerCAmelCase ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop() lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.22_3967 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop(prediction_type="""v_prediction""" ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def UpperCamelCase_ ( self ): # We specify different beta, so that the first alpha is 0.99 lowerCamelCase__ = self.full_loop(set_alpha_to_one=_lowerCAmelCase ,beta_start=0.01 ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def UpperCamelCase_ ( self ): # We specify different beta, so that the first alpha is 0.99 lowerCamelCase__ = self.full_loop(set_alpha_to_one=_lowerCAmelCase ,beta_start=0.01 ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCamelCase : Tuple = 'true' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=82 , __lowerCAmelCase : Tuple=16 ): set_seed(42 ) lowerCamelCase__ = RegressionModel() lowerCamelCase__ = deepcopy(__lowerCAmelCase ) lowerCamelCase__ = RegressionDataset(length=__lowerCAmelCase ) lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase ) model.to(accelerator.device ) lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) return model, ddp_model, dataloader def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : Optional[Any]=False ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(__lowerCAmelCase : str ): lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs with accelerator.main_process_first(): lowerCamelCase__ = dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__lowerCAmelCase : Dict ): if use_longest: return tokenizer.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(__lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=16 ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = Accelerator(dispatch_batches=__lowerCAmelCase , split_batches=__lowerCAmelCase ) lowerCamelCase__ = get_dataloader(__lowerCAmelCase , not dispatch_batches ) lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ): lowerCamelCase__ = [] for batch in dataloader: lowerCamelCase__ , lowerCamelCase__ = batch.values() with torch.no_grad(): lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowerCamelCase__ , lowerCamelCase__ = [], [] for logit, targ in logits_and_targets: logits.append(__lowerCAmelCase ) targs.append(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = torch.cat(__lowerCAmelCase ), torch.cat(__lowerCAmelCase ) return logits, targs def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : Tuple=82 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=16 ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_basic_setup(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = generate_predictions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) assert ( len(__lowerCAmelCase ) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowerCAmelCase )}''' def A__ ( __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False ): lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" ) lowerCamelCase__ , lowerCamelCase__ = get_mrpc_setup(__lowerCAmelCase , __lowerCAmelCase ) # First do baseline lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = setup["""no"""] model.to(__lowerCAmelCase ) model.eval() for batch in dataloader: batch.to(__lowerCAmelCase ) with torch.inference_mode(): lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__lowerCAmelCase , references=batch["""labels"""] ) lowerCamelCase__ = metric.compute() # Then do distributed lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.logits.argmax(dim=-1 ) lowerCamelCase__ = batch["""labels"""] lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__lowerCAmelCase , references=__lowerCAmelCase ) lowerCamelCase__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def A__ ( ): lowerCamelCase__ = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__lowerCAmelCase , __lowerCAmelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowerCamelCase__ = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase ) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__lowerCAmelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) lowerCamelCase__ = Accelerator() test_torch_metrics(__lowerCAmelCase , 512 ) accelerator.state._reset_state() def A__ ( __lowerCAmelCase : List[Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if index == number_of_items: return 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ = values[index] + knapsack( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } UpperCamelCase : List[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28} class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] _UpperCamelCase = BlenderbotTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase="replace" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,errors=_lowerCAmelCase ,bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,trim_offsets=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,_lowerCAmelCase ) != add_prefix_space: lowerCamelCase__ = getattr(_lowerCAmelCase ,pre_tok_state.pop("""type""" ) ) lowerCamelCase__ = add_prefix_space lowerCamelCase__ = pre_tok_class(**_lowerCAmelCase ) lowerCamelCase__ = add_prefix_space lowerCamelCase__ = """post_processor""" lowerCamelCase__ = getattr(self.backend_tokenizer ,_lowerCAmelCase ,_lowerCAmelCase ) if tokenizer_component_instance: lowerCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCamelCase__ = tuple(state["""sep"""] ) if "cls" in state: lowerCamelCase__ = tuple(state["""cls"""] ) lowerCamelCase__ = False if state.get("""add_prefix_space""" ,_lowerCAmelCase ) != add_prefix_space: lowerCamelCase__ = add_prefix_space lowerCamelCase__ = True if state.get("""trim_offsets""" ,_lowerCAmelCase ) != trim_offsets: lowerCamelCase__ = trim_offsets lowerCamelCase__ = True if changes_to_apply: lowerCamelCase__ = getattr(_lowerCAmelCase ,state.pop("""type""" ) ) lowerCamelCase__ = component_class(**_lowerCAmelCase ) setattr(self.backend_tokenizer ,_lowerCAmelCase ,_lowerCAmelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCamelCase_ ( self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else value lowerCamelCase__ = value def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): lowerCamelCase__ = kwargs.get("""is_split_into_words""" ,_lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): lowerCamelCase__ = kwargs.get("""is_split_into_words""" ,_lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): return token_ids_a + [self.eos_token_id] def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text ) else: # Generated responses should contain them already. inputs.append(_lowerCAmelCase ) lowerCamelCase__ = """ """.join(_lowerCAmelCase ) lowerCamelCase__ = self.encode(_lowerCAmelCase ) if len(_lowerCAmelCase ) > self.model_max_length: lowerCamelCase__ = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Any = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Union[str, Any] = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def A__ ( __lowerCAmelCase : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class UpperCamelCase__ (a ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,) download_parser.add_argument("""model""" ,type=_lowerCAmelCase ,help="""Name of the model to download""" ) download_parser.set_defaults(func=_lowerCAmelCase ) def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model lowerCamelCase__ = cache lowerCamelCase__ = force lowerCamelCase__ = trust_remote_code def UpperCamelCase_ ( self ): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'cvt' def __init__( self ,_lowerCAmelCase=3 ,_lowerCAmelCase=[7, 3, 3] ,_lowerCAmelCase=[4, 2, 2] ,_lowerCAmelCase=[2, 1, 1] ,_lowerCAmelCase=[64, 1_92, 3_84] ,_lowerCAmelCase=[1, 3, 6] ,_lowerCAmelCase=[1, 2, 10] ,_lowerCAmelCase=[4.0, 4.0, 4.0] ,_lowerCAmelCase=[0.0, 0.0, 0.0] ,_lowerCAmelCase=[0.0, 0.0, 0.0] ,_lowerCAmelCase=[0.0, 0.0, 0.1] ,_lowerCAmelCase=[True, True, True] ,_lowerCAmelCase=[False, False, True] ,_lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] ,_lowerCAmelCase=[3, 3, 3] ,_lowerCAmelCase=[1, 1, 1] ,_lowerCAmelCase=[2, 2, 2] ,_lowerCAmelCase=[1, 1, 1] ,_lowerCAmelCase=[1, 1, 1] ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = num_channels lowerCamelCase__ = patch_sizes lowerCamelCase__ = patch_stride lowerCamelCase__ = patch_padding lowerCamelCase__ = embed_dim lowerCamelCase__ = num_heads lowerCamelCase__ = depth lowerCamelCase__ = mlp_ratio lowerCamelCase__ = attention_drop_rate lowerCamelCase__ = drop_rate lowerCamelCase__ = drop_path_rate lowerCamelCase__ = qkv_bias lowerCamelCase__ = cls_token lowerCamelCase__ = qkv_projection_method lowerCamelCase__ = kernel_qkv lowerCamelCase__ = padding_kv lowerCamelCase__ = stride_kv lowerCamelCase__ = padding_q lowerCamelCase__ = stride_q lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' from __future__ import annotations UpperCamelCase : Optional[int] = 10 def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 1 lowerCamelCase__ = max(__lowerCAmelCase ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase__ = int((i / placement) % RADIX ) buckets[tmp].append(__lowerCAmelCase ) # put each buckets' contents into list_of_ints lowerCamelCase__ = 0 for b in range(__lowerCAmelCase ): for i in buckets[b]: lowerCamelCase__ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'} UpperCamelCase : int = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } UpperCamelCase : Tuple = { 'YituTech/conv-bert-base': 5_12, 'YituTech/conv-bert-medium-small': 5_12, 'YituTech/conv-bert-small': 5_12, } UpperCamelCase : Dict = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ConvBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""width_multiplier""" ) ) class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=64 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase="swish" ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=None ,_lowerCAmelCase=0.25 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = make_divisible(5_12 * width_multiplier ,divisor=8 ) lowerCamelCase__ = hidden_act lowerCamelCase__ = conv_kernel_size lowerCamelCase__ = output_stride lowerCamelCase__ = classifier_dropout_prob lowerCamelCase__ = use_labels lowerCamelCase__ = is_training lowerCamelCase__ = num_labels lowerCamelCase__ = initializer_range lowerCamelCase__ = scope lowerCamelCase__ = width_multiplier lowerCamelCase__ = ffn_dropout lowerCamelCase__ = attn_dropout def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): return MobileViTVaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = MobileViTVaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MobileViTVaForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MobileViTVaForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) _UpperCamelCase = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTVaModelTester(self ) lowerCamelCase__ = MobileViTVaConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def UpperCamelCase_ ( self ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.hidden_states lowerCamelCase__ = 5 self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ = 2 for i in range(len(_lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,) divisor *= 2 self.assertEqual(self.model_tester.output_stride ,divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = MobileViTVaModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( _lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ = model.to(_lowerCAmelCase ) lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) lowerCamelCase__ = outputs.logits # verify the logits lowerCamelCase__ = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] ,device=_lowerCAmelCase ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ = model.to(_lowerCAmelCase ) lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) lowerCamelCase__ = outputs.logits.detach().cpu() lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ,target_sizes=[(50, 60)] ) lowerCamelCase__ = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape ,_lowerCAmelCase ) lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ) lowerCamelCase__ = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase : Dict = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = ['MaskFormerFeatureExtractor'] UpperCamelCase : Tuple = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] UpperCamelCase : Tuple = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = DebertaTokenizer _UpperCamelCase = True _UpperCamelCase = DebertaTokenizerFast def UpperCamelCase_ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) ) lowerCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCamelCase__ = {"""unk_token""": """[UNK]"""} lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """lower newer""" lowerCamelCase__ = """lower newer""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = """lower newer""" lowerCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowerCamelCase__ = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokens + [tokenizer.unk_token] lowerCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = tokenizer("""Hello""" ,"""World""" ) lowerCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode( """sequence builders""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode( """sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase__ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) lowerCamelCase__ = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] lowerCamelCase__ = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ) lowerCamelCase__ = [tokenizer.decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ) for seq in encoding["""input_ids"""]] # fmt: off lowerCamelCase__ = { """input_ids""": [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase__ = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data ,_lowerCAmelCase ) for expected, decoded in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : Any = { 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'data2vec-vision' def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=16 ,_lowerCAmelCase=3 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=True ,_lowerCAmelCase=[3, 5, 7, 11] ,_lowerCAmelCase=[1, 2, 3, 6] ,_lowerCAmelCase=True ,_lowerCAmelCase=0.4 ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=1 ,_lowerCAmelCase=False ,_lowerCAmelCase=2_55 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = use_mask_token lowerCamelCase__ = use_absolute_position_embeddings lowerCamelCase__ = use_relative_position_bias lowerCamelCase__ = use_shared_relative_position_bias lowerCamelCase__ = layer_scale_init_value lowerCamelCase__ = drop_path_rate lowerCamelCase__ = use_mean_pooling # decode head attributes (semantic segmentation) lowerCamelCase__ = out_indices lowerCamelCase__ = pool_scales # auxiliary head attributes (semantic segmentation) lowerCamelCase__ = use_auxiliary_head lowerCamelCase__ = auxiliary_loss_weight lowerCamelCase__ = auxiliary_channels lowerCamelCase__ = auxiliary_num_convs lowerCamelCase__ = auxiliary_concat_input lowerCamelCase__ = semantic_loss_ignore_index class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = version.parse('1.11' ) @property def UpperCamelCase_ ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCamelCase_ ( self ): return 1E-4
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope def UpperCamelCase_ ( self ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices ) lowerCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self ): return OpenLlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,use_stable_embedding=_lowerCAmelCase ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = OpenLlamaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = True lowerCamelCase__ = OpenLlamaModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model( _lowerCAmelCase ,attention_mask=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,encoder_attention_mask=_lowerCAmelCase ,) lowerCamelCase__ = model( _lowerCAmelCase ,attention_mask=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,) lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = OpenLlamaForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = OpenLlamaForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # first forward pass lowerCamelCase__ = model( _lowerCAmelCase ,attention_mask=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,encoder_attention_mask=_lowerCAmelCase ,use_cache=_lowerCAmelCase ,) lowerCamelCase__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowerCamelCase__ = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowerCamelCase__ = torch.cat([input_mask, next_mask] ,dim=-1 ) lowerCamelCase__ = model( _lowerCAmelCase ,attention_mask=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,encoder_attention_mask=_lowerCAmelCase ,output_hidden_states=_lowerCAmelCase ,)["""hidden_states"""][0] lowerCamelCase__ = model( _lowerCAmelCase ,attention_mask=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,encoder_attention_mask=_lowerCAmelCase ,past_key_values=_lowerCAmelCase ,output_hidden_states=_lowerCAmelCase ,)["""hidden_states"""][0] # select random slice lowerCamelCase__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _UpperCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () _UpperCamelCase = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = OpenLlamaModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = input_dict["""input_ids"""] lowerCamelCase__ = input_ids.ne(1 ).to(_lowerCAmelCase ) lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowerCamelCase__ = OpenLlamaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = """single_label_classification""" lowerCamelCase__ = input_dict["""input_ids"""] lowerCamelCase__ = input_ids.ne(1 ).to(_lowerCAmelCase ) lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowerCamelCase__ = OpenLlamaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = """multi_label_classification""" lowerCamelCase__ = input_dict["""input_ids"""] lowerCamelCase__ = input_ids.ne(1 ).to(_lowerCAmelCase ) lowerCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase__ = OpenLlamaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCamelCase_ ( self ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ids_tensor([1, 10] ,config.vocab_size ) lowerCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase__ = OpenLlamaModel(_lowerCAmelCase ) original_model.to(_lowerCAmelCase ) original_model.eval() lowerCamelCase__ = original_model(_lowerCAmelCase ).last_hidden_state lowerCamelCase__ = original_model(_lowerCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase__ = {"""type""": scaling_type, """factor""": 10.0} lowerCamelCase__ = OpenLlamaModel(_lowerCAmelCase ) scaled_model.to(_lowerCAmelCase ) scaled_model.eval() lowerCamelCase__ = scaled_model(_lowerCAmelCase ).last_hidden_state lowerCamelCase__ = scaled_model(_lowerCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-5 ) )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import os UpperCamelCase : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00} def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = 0 lowerCamelCase__ = 0 while index < len(__lowerCAmelCase ) - 1: lowerCamelCase__ = SYMBOLS[numerals[index]] lowerCamelCase__ = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = """""" lowerCamelCase__ = num // 1000 numerals += m_count * "M" num %= 1000 lowerCamelCase__ = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 lowerCamelCase__ = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A__ ( __lowerCAmelCase : str = "/p089_roman.txt" ): lowerCamelCase__ = 0 with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea: lowerCamelCase__ = filea.readlines() for line in lines: lowerCamelCase__ = line.strip() lowerCamelCase__ = parse_roman_numerals(__lowerCAmelCase ) lowerCamelCase__ = generate_roman_numerals(__lowerCAmelCase ) savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase ) return savings if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ): lowerCamelCase__ = len(__lowerCAmelCase ) lowerCamelCase__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCamelCase__ = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowerCamelCase__ = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowerCamelCase__ = subset[i - 1][j] if arr[i - 1] <= j: lowerCamelCase__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = np.full((len(__lowerCAmelCase ), sequence_length, 2) , __lowerCAmelCase ) else: lowerCamelCase__ = np.full((len(__lowerCAmelCase ), sequence_length) , __lowerCAmelCase ) for i, tensor in enumerate(__lowerCAmelCase ): if padding_side == "right": if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = tensor[:sequence_length] else: lowerCamelCase__ = tensor[:sequence_length] else: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = tensor[:sequence_length] else: lowerCamelCase__ = tensor[:sequence_length] return out_tensor.tolist() def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = ord(__lowerCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True lowerCamelCase__ = unicodedata.category(__lowerCAmelCase ) if cat.startswith("""P""" ): return True return False @dataclass class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase_ ( self ,_lowerCAmelCase ): import torch lowerCamelCase__ = """label""" if """label""" in features[0].keys() else """labels""" lowerCamelCase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowerCamelCase__ = self.tokenizer.pad( _lowerCAmelCase ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,) if labels is None: return batch lowerCamelCase__ = torch.tensor(batch["""entity_ids"""] ).shape[1] lowerCamelCase__ = self.tokenizer.padding_side if padding_side == "right": lowerCamelCase__ = [ list(_lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_lowerCAmelCase )) for label in labels ] else: lowerCamelCase__ = [ [self.label_pad_token_id] * (sequence_length - len(_lowerCAmelCase )) + list(_lowerCAmelCase ) for label in labels ] lowerCamelCase__ = [feature["""ner_tags"""] for feature in features] lowerCamelCase__ = padding_tensor(_lowerCAmelCase ,-1 ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = [feature["""original_entity_spans"""] for feature in features] lowerCamelCase__ = padding_tensor(_lowerCAmelCase ,(-1, -1) ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = {k: torch.tensor(_lowerCAmelCase ,dtype=torch.intaa ) for k, v in batch.items()} return batch
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = name lowerCamelCase__ = val def __str__( self ): return F'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,_lowerCAmelCase ): return self.val < other.val class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = {} lowerCamelCase__ = {} lowerCamelCase__ = self.build_heap(_lowerCAmelCase ) def __getitem__( self ,_lowerCAmelCase ): return self.get_value(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return (idx - 1) // 2 def UpperCamelCase_ ( self ,_lowerCAmelCase ): return idx * 2 + 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ): return idx * 2 + 2 def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.heap_dict[key] def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = len(_lowerCAmelCase ) - 1 lowerCamelCase__ = self.get_parent_idx(_lowerCAmelCase ) for idx, i in enumerate(_lowerCAmelCase ): lowerCamelCase__ = idx lowerCamelCase__ = i.val for i in range(_lowerCAmelCase ,-1 ,-1 ): self.sift_down(_lowerCAmelCase ,_lowerCAmelCase ) return array def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): while True: lowerCamelCase__ = self.get_left_child_idx(_lowerCAmelCase ) # noqa: E741 lowerCamelCase__ = self.get_right_child_idx(_lowerCAmelCase ) lowerCamelCase__ = idx if l < len(_lowerCAmelCase ) and array[l] < array[idx]: lowerCamelCase__ = l if r < len(_lowerCAmelCase ) and array[r] < array[smallest]: lowerCamelCase__ = r if smallest != idx: lowerCamelCase__ , lowerCamelCase__ = array[smallest], array[idx] ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowerCamelCase__ = smallest else: break def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.get_parent_idx(_lowerCAmelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowerCamelCase__ , lowerCamelCase__ = self.heap[idx], self.heap[p] lowerCamelCase__ , lowerCamelCase__ = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowerCamelCase__ = p lowerCamelCase__ = self.get_parent_idx(_lowerCAmelCase ) def UpperCamelCase_ ( self ): return self.heap[0] def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.heap[-1], self.heap[0] lowerCamelCase__ , lowerCamelCase__ = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowerCamelCase__ = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def UpperCamelCase_ ( self ,_lowerCAmelCase ): self.heap.append(_lowerCAmelCase ) lowerCamelCase__ = len(self.heap ) - 1 lowerCamelCase__ = node.val self.sift_up(len(self.heap ) - 1 ) def UpperCamelCase_ ( self ): return len(self.heap ) == 0 def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowerCamelCase__ = new_value lowerCamelCase__ = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase : str = Node('R', -1) UpperCamelCase : List[Any] = Node('B', 6) UpperCamelCase : Any = Node('A', 3) UpperCamelCase : List[Any] = Node('X', 1) UpperCamelCase : Union[str, Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase : Any = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase : Tuple = 16 UpperCamelCase : List[Any] = 32 def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__lowerCAmelCase : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase__ = datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__lowerCAmelCase : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase__ = 16 elif accelerator.mixed_precision != "no": lowerCamelCase__ = 8 else: lowerCamelCase__ = None return tokenizer.pad( __lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCamelCase__ = DataLoader( tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) lowerCamelCase__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1": lowerCamelCase__ = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCamelCase__ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ = config["""lr"""] lowerCamelCase__ = int(config["""num_epochs"""] ) lowerCamelCase__ = int(config["""seed"""] ) lowerCamelCase__ = int(config["""batch_size"""] ) set_seed(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowerCamelCase__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase__ = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase__ = model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase ) # Instantiate scheduler lowerCamelCase__ = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCamelCase__ = os.path.split(__lowerCAmelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase ) # Now we train the model for epoch in range(__lowerCAmelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCamelCase__ = 0 for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCamelCase__ = loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.logits.argmax(dim=-1 ) lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) lowerCamelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { """accuracy""": eval_metric["""accuracy"""], """f1""": eval_metric["""f1"""], """train_loss""": total_loss.item() / len(__lowerCAmelCase ), """epoch""": epoch, } , step=__lowerCAmelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A__ ( ): lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = (UnCLIPScheduler,) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = { """num_train_timesteps""": 10_00, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCAmelCase ) return config def UpperCamelCase_ ( self ): for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for time_step in [0, 5_00, 9_99]: for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase ,prev_timestep=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(variance_type="""learned_range""" ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = 0.5 assert scheduler._get_variance(1 ,predicted_variance=_lowerCAmelCase ) - -10.171_2790 < 1E-5 assert scheduler._get_variance(4_87 ,predicted_variance=_lowerCAmelCase ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(9_99 ,predicted_variance=_lowerCAmelCase ) - -0.001_0011 < 1E-5 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = scheduler.timesteps lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter lowerCamelCase__ = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ).prev_sample lowerCamelCase__ = pred_prev_sample lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) lowerCamelCase__ = scheduler.timesteps lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter lowerCamelCase__ = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) if i + 1 == timesteps.shape[0]: lowerCamelCase__ = None else: lowerCamelCase__ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCamelCase__ = scheduler.step( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,prev_timestep=_lowerCAmelCase ,generator=_lowerCAmelCase ).prev_sample lowerCamelCase__ = pred_prev_sample lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): pass
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=3 ,_lowerCAmelCase=10 ,_lowerCAmelCase=[8, 16, 32, 64] ,_lowerCAmelCase=[1, 1, 2, 1] ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,_lowerCAmelCase=["stage2", "stage3", "stage4"] ,_lowerCAmelCase=[2, 3, 4] ,_lowerCAmelCase=1 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = embeddings_size lowerCamelCase__ = hidden_sizes lowerCamelCase__ = depths lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_act lowerCamelCase__ = num_labels lowerCamelCase__ = scope lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = out_features lowerCamelCase__ = out_indices lowerCamelCase__ = num_groups def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return BitConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = BitModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = BitForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = BitBackbone(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase__ = None lowerCamelCase__ = BitBackbone(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _UpperCamelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = BitModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): return @unittest.skip(reason="""Bit does not output attentions""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(config=_lowerCAmelCase ) for name, module in model.named_modules(): if isinstance(_lowerCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) self.assertTrue( torch.all(module.bias == 0 ) ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ = self.model_tester.num_stages self.assertEqual(len(_lowerCAmelCase ) ,expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase__ = layer_type lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = BitModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @require_torch class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (BitBackbone,) if is_torch_available() else () _UpperCamelCase = BitConfig _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = BitModelTester(self )
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' import os def A__ ( __lowerCAmelCase : str = "input.txt" ): with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file: lowerCamelCase__ = [ [int(__lowerCAmelCase ) for element in line.split(""",""" )] for line in input_file.readlines() ] lowerCamelCase__ = len(__lowerCAmelCase ) lowerCamelCase__ = len(matrix[0] ) lowerCamelCase__ = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase ): lowerCamelCase__ = matrix[i][0] for j in range(1 , __lowerCAmelCase ): for i in range(__lowerCAmelCase ): lowerCamelCase__ = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __lowerCAmelCase ): lowerCamelCase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="""utf-8""" ,check=_lowerCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # configuration for running training on smdistributed Model Parallel lowerCamelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' ,instance_count=_lowerCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=_lowerCAmelCase ,hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_00, } ,metric_definitions=self.env.metric_definitions ,distribution=_lowerCAmelCase ,py_version="""py36""" ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ): TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # create estimator lowerCamelCase__ = self.create_estimator(_lowerCAmelCase ) # run training estimator.fit() # result dataframe lowerCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_lowerCAmelCase )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu UpperCamelCase : Tuple = [ 'EAGER', 'AOT_EAGER', 'INDUCTOR', 'NVFUSER', 'AOT_NVFUSER', 'AOT_CUDAGRAPHS', 'OFI', 'FX2TRT', 'ONNXRT', 'IPEX', ] def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : int=None ): lowerCamelCase__ = True while ask_again: lowerCamelCase__ = input(__lowerCAmelCase ) try: if default is not None and len(__lowerCAmelCase ) == 0: return default return convert_value(__lowerCAmelCase ) if convert_value is not None else result except Exception: if error_message is not None: print(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=[] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=0 ): lowerCamelCase__ = BulletMenu(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = menu.run(default_choice=__lowerCAmelCase ) return convert_value(__lowerCAmelCase ) if convert_value is not None else result def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = int(__lowerCAmelCase ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = int(__lowerCAmelCase ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = int(__lowerCAmelCase ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def A__ ( __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = int(__lowerCAmelCase ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = int(__lowerCAmelCase ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def A__ ( __lowerCAmelCase : Any ): return {"yes": True, "no": False}[value.lower()] class UpperCamelCase__ (argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = super()._format_usage(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = usage.replace("""<command> [<args>] """ ,"""""" ) return usage
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' import numpy as np import qiskit def A__ ( __lowerCAmelCase : int = 8 , __lowerCAmelCase : int | None = None ): lowerCamelCase__ = np.random.default_rng(seed=__lowerCAmelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowerCamelCase__ = 6 * key_len # Measurement basis for Alice's qubits. lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase ) # The set of states Alice will prepare. lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase ) # Measurement basis for Bob's qubits. lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase ) # Quantum Circuit to simulate BB84 lowerCamelCase__ = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(__lowerCAmelCase ): if alice_state[index] == 1: bbaa_circ.x(__lowerCAmelCase ) if alice_basis[index] == 1: bbaa_circ.h(__lowerCAmelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(__lowerCAmelCase ): if bob_basis[index] == 1: bbaa_circ.h(__lowerCAmelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowerCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowerCamelCase__ = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase ) # Returns the result of measurement. lowerCamelCase__ = job.result().get_counts(__lowerCAmelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowerCamelCase__ = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowerCamelCase__ = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" ) return key if __name__ == "__main__": print(F'The generated key is : {bbaa(8, seed=0)}') from doctest import testmod testmod()
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' UpperCamelCase : Optional[int] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} UpperCamelCase : Tuple = ['a', 'b', 'c', 'd', 'e'] def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ): lowerCamelCase__ = start # add current to visited visited.append(__lowerCAmelCase ) lowerCamelCase__ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: lowerCamelCase__ = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # if all neighbors visited add current to sort sort.append(__lowerCAmelCase ) # if all vertices haven't been visited select a new one to visit if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): for vertice in vertices: if vertice not in visited: lowerCamelCase__ = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # return sort return sort if __name__ == "__main__": UpperCamelCase : int = topological_sort('a', [], []) print(sort)
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Any = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) sd_pipe.set_scheduler("""sample_euler""" ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sd_pipe([prompt] ,generator=_lowerCAmelCase ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCamelCase__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) sd_pipe.set_scheduler("""sample_euler""" ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sd_pipe([prompt] ,generator=_lowerCAmelCase ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCamelCase__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=_lowerCAmelCase ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCamelCase__ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' import json import sys def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , encoding="""utf-8""" ) as f: lowerCamelCase__ = json.load(__lowerCAmelCase ) lowerCamelCase__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(__lowerCAmelCase ): lowerCamelCase__ = results[benchmark_name] lowerCamelCase__ = benchmark_name.split("""/""" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) lowerCamelCase__ = """| metric |""" lowerCamelCase__ = """|--------|""" lowerCamelCase__ = """| new / old (diff) |""" for metric_name in sorted(__lowerCAmelCase ): lowerCamelCase__ = benchmark_res[metric_name] lowerCamelCase__ = metric_vals["""new"""] lowerCamelCase__ = metric_vals.get("""old""" , __lowerCAmelCase ) lowerCamelCase__ = metric_vals.get("""diff""" , __lowerCAmelCase ) lowerCamelCase__ = F''' {new_val:f}''' if isinstance(__lowerCAmelCase , (int, float) ) else """None""" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(__lowerCAmelCase , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(__lowerCAmelCase , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("""</details>""" ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.writelines("""\n""".join(__lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Dict = sys.argv[1] UpperCamelCase : Any = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=16 ,_lowerCAmelCase=[1, 2, 1] ,_lowerCAmelCase=[2, 2, 4] ,_lowerCAmelCase=2 ,_lowerCAmelCase=2.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=8 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = patch_norm lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range lowerCamelCase__ = is_training lowerCamelCase__ = scope lowerCamelCase__ = use_labels lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = encoder_stride def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = SwinvaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) lowerCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = SwinvaForMaskedImageModeling(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = SwinvaForMaskedImageModeling(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.type_sequence_label_size lowerCamelCase__ = SwinvaForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _UpperCamelCase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = SwinvaModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,embed_dim=37 ) def UpperCamelCase_ ( self ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = True for model_class in self.all_model_classes: lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.attentions lowerCamelCase__ = len(self.model_tester.depths ) self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase__ = True lowerCamelCase__ = config.window_size**2 lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowerCamelCase__ = len(_lowerCAmelCase ) # Check attention is always last and order is fine lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): lowerCamelCase__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCamelCase__ = 2 self.assertEqual(out_len + added_hidden_states ,len(_lowerCAmelCase ) ) lowerCamelCase__ = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.hidden_states lowerCamelCase__ = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # Swinv2 has a different seq_length lowerCamelCase__ = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowerCamelCase__ = outputs.reshaped_hidden_states self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = reshaped_hidden_states[0].shape lowerCamelCase__ = ( reshaped_hidden_states[0].view(_lowerCAmelCase ,_lowerCAmelCase ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase__ = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,(padded_height, padded_width) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = SwinvaModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( _lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : List[Any] = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'marian' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self ,_lowerCAmelCase=5_81_01 ,_lowerCAmelCase=None ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=12 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=16 ,_lowerCAmelCase=12 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=16 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=5_81_00 ,_lowerCAmelCase=False ,_lowerCAmelCase=5_81_00 ,_lowerCAmelCase=0 ,_lowerCAmelCase=0 ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = decoder_vocab_size or vocab_size lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = d_model lowerCamelCase__ = encoder_ffn_dim lowerCamelCase__ = encoder_layers lowerCamelCase__ = encoder_attention_heads lowerCamelCase__ = decoder_ffn_dim lowerCamelCase__ = decoder_layers lowerCamelCase__ = decoder_attention_heads lowerCamelCase__ = dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = activation_function lowerCamelCase__ = init_std lowerCamelCase__ = encoder_layerdrop lowerCamelCase__ = decoder_layerdrop lowerCamelCase__ = use_cache lowerCamelCase__ = encoder_layers lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase__ = share_encoder_decoder_embeddings super().__init__( pad_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,) class UpperCamelCase__ (a ): '''simple docstring''' @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def UpperCamelCase_ ( self ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowerCamelCase__ = {0: """batch"""} lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """decoder_sequence"""} lowerCamelCase__ = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ = self.num_layers for i in range(_lowerCAmelCase ): lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} else: lowerCamelCase__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def UpperCamelCase_ ( self ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ = super().outputs else: lowerCamelCase__ = super(_lowerCAmelCase ,self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ = self.num_layers for i in range(_lowerCAmelCase ): lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # Generate decoder inputs lowerCamelCase__ = seq_length if not self.use_past else 1 lowerCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ = dict(**_lowerCAmelCase ,**_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape lowerCamelCase__ = common_inputs["""decoder_input_ids"""].shape[1] lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads lowerCamelCase__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ = decoder_seq_length + 3 lowerCamelCase__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase )] ,dim=1 ) lowerCamelCase__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ = self.num_layers lowerCamelCase__ = min(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = max(_lowerCAmelCase ,_lowerCAmelCase ) - min_num_layers lowerCamelCase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(_lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), ) ) # TODO: test this. lowerCamelCase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(_lowerCAmelCase ,_lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) ) return common_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ = self.num_layers lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads lowerCamelCase__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ = common_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [common_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase ) ] return common_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase__ = compute_effective_axis_dimension( _lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ = tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) lowerCamelCase__ = compute_effective_axis_dimension( _lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ = dict(tokenizer(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) ) return common_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) else: lowerCamelCase__ = self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) return common_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ = super()._flatten_past_key_values_(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) else: lowerCamelCase__ = super(_lowerCAmelCase ,self )._flatten_past_key_values_( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) @property def UpperCamelCase_ ( self ): return 1E-4
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets UpperCamelCase : Optional[int] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' UpperCamelCase : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' UpperCamelCase : Tuple = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = 0.0 for i, j in zip(_lowerCAmelCase ,_lowerCAmelCase ): n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase ,_lowerCAmelCase ) else 0.0 lowerCamelCase__ = n_correct / len(_lowerCAmelCase ) return { "accuracy": accuracy, }
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = LEDConfig _UpperCamelCase = {} _UpperCamelCase = 'gelu' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=20 ,_lowerCAmelCase=2 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=4 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = eos_token_id lowerCamelCase__ = pad_token_id lowerCamelCase__ = bos_token_id lowerCamelCase__ = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCamelCase__ = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCamelCase__ = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) lowerCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) lowerCamelCase__ = tf.concat([input_ids, eos_tensor] ,axis=1 ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,) lowerCamelCase__ = prepare_led_inputs_dict(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tf.concat( [tf.zeros_like(_lowerCAmelCase )[:, :-1], tf.ones_like(_lowerCAmelCase )[:, -1:]] ,axis=-1 ,) lowerCamelCase__ = global_attention_mask return config, inputs_dict def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFLEDModel(config=_lowerCAmelCase ).get_decoder() lowerCamelCase__ = inputs_dict["""input_ids"""] lowerCamelCase__ = input_ids[:1, :] lowerCamelCase__ = inputs_dict["""attention_mask"""][:1, :] lowerCamelCase__ = 1 # first forward pass lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,use_cache=_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowerCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and lowerCamelCase__ = tf.concat([input_ids, next_tokens] ,axis=-1 ) lowerCamelCase__ = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase )[0] lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,past_key_values=_lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice lowerCamelCase__ = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , ): if attention_mask is None: lowerCamelCase__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () _UpperCamelCase = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFLEDModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = tf.zeros_like(inputs_dict["""attention_mask"""] ) lowerCamelCase__ = 2 lowerCamelCase__ = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict["""global_attention_mask"""] ,) lowerCamelCase__ = True lowerCamelCase__ = self.model_tester.seq_length lowerCamelCase__ = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_lowerCAmelCase ): lowerCamelCase__ = outputs.decoder_attentions self.assertEqual(len(_lowerCAmelCase ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,) def check_encoder_attentions_output(_lowerCAmelCase ): lowerCamelCase__ = [t.numpy() for t in outputs.encoder_attentions] lowerCamelCase__ = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_lowerCAmelCase ) ,self.model_tester.num_hidden_layers ) self.assertEqual(len(_lowerCAmelCase ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,) self.assertListEqual( list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,) for model_class in self.all_model_classes: lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = len(_lowerCAmelCase ) self.assertEqual(config.output_hidden_states ,_lowerCAmelCase ) check_encoder_attentions_output(_lowerCAmelCase ) if self.is_encoder_decoder: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states ,_lowerCAmelCase ) check_decoder_attentions_output(_lowerCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states ,_lowerCAmelCase ) check_encoder_attentions_output(_lowerCAmelCase ) # Check attention is always last and order is fine lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(_lowerCAmelCase ) ) self.assertEqual(model.config.output_hidden_states ,_lowerCAmelCase ) check_encoder_attentions_output(_lowerCAmelCase ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # TODO: Head-masking not yet implement pass def A__ ( __lowerCAmelCase : Optional[int] ): return tf.constant(__lowerCAmelCase , dtype=tf.intaa ) UpperCamelCase : List[Any] = 1E-4 @slow @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here lowerCamelCase__ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCamelCase__ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCamelCase__ = prepare_led_inputs_dict(model.config ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase )[0] lowerCamelCase__ = (1, 10_24, 7_68) self.assertEqual(output.shape ,_lowerCAmelCase ) # change to expected output here lowerCamelCase__ = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] ,) tf.debugging.assert_near(output[:, :3, :3] ,_lowerCAmelCase ,atol=1E-3 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here lowerCamelCase__ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCamelCase__ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCamelCase__ = prepare_led_inputs_dict(model.config ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase )[0] lowerCamelCase__ = (1, 10_24, model.config.vocab_size) self.assertEqual(output.shape ,_lowerCAmelCase ) # change to expected output here lowerCamelCase__ = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] ,) tf.debugging.assert_near(output[:, :3, :3] ,_lowerCAmelCase ,atol=1E-3 ,rtol=1E-3 )
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ProphetNetTokenizer _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() lowerCamelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """UNwant\u00E9d,running""" lowerCamelCase__ = """unwanted, running""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class(self.vocab_file ) lowerCamelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(_lowerCAmelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[9, 6, 7, 12, 10, 11] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowerCamelCase__ = {} for i, token in enumerate(_lowerCAmelCase ): lowerCamelCase__ = i lowerCamelCase__ = WordpieceTokenizer(vocab=_lowerCAmelCase ,unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) ,[] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] ) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCamelCase__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCamelCase__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02] lowerCamelCase__ = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""pt""" ) self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) def UpperCamelCase_ ( self ): self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase_ ( self ): self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase_ ( self ): self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase ) assert encoded_sentence == text + [1_02] assert encoded_pair == text + [1_02] + text_a + [1_02]
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : int = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCamelCase : List[str] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCamelCase : Optional[int] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCamelCase : Dict = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCamelCase : List[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCamelCase : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCamelCase : int = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCamelCase : Tuple = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCamelCase : Tuple = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCamelCase : str = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCamelCase : Optional[int] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCamelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCamelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCamelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCamelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCamelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModel) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase : Tuple = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase : Any = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase : int = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCamelCase : int = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Any = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) UpperCamelCase : Tuple = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCamelCase__ = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) lowerCamelCase__ = value else: lowerCamelCase__ = value return new_state_dict def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase__ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[:256, :] lowerCamelCase__ = in_proj_bias[:256] lowerCamelCase__ = in_proj_weight[256:512, :] lowerCamelCase__ = in_proj_bias[256:512] lowerCamelCase__ = in_proj_weight[-256:, :] lowerCamelCase__ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowerCamelCase__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[:256, :] lowerCamelCase__ = in_proj_bias[:256] lowerCamelCase__ = in_proj_weight[256:512, :] lowerCamelCase__ = in_proj_bias[256:512] lowerCamelCase__ = in_proj_weight[-256:, :] lowerCamelCase__ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowerCamelCase__ = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) lowerCamelCase__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowerCamelCase__ = in_proj_weight_cross_attn[:256, :] lowerCamelCase__ = in_proj_bias_cross_attn[:256] lowerCamelCase__ = in_proj_weight_cross_attn[256:512, :] lowerCamelCase__ = in_proj_bias_cross_attn[256:512] lowerCamelCase__ = in_proj_weight_cross_attn[-256:, :] lowerCamelCase__ = in_proj_bias_cross_attn[-256:] def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): lowerCamelCase__ , lowerCamelCase__ = image.size lowerCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = 800 if """detection""" in checkpoint_url else 1000 lowerCamelCase__ = target_max_size / current_max_size lowerCamelCase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = F.to_tensor(__lowerCAmelCase ) lowerCamelCase__ = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ): logger.info("""Converting model...""" ) # load original state dict lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = rename_backbone_keys(__lowerCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowerCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCamelCase__ = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # create HuggingFace model and load state dict lowerCamelCase__ = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = {0: """table""", 1: """table rotated"""} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} else: lowerCamelCase__ = 125 lowerCamelCase__ = 6 lowerCamelCase__ = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 ) lowerCamelCase__ = TableTransformerForObjectDetection(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # verify our conversion lowerCamelCase__ = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" lowerCamelCase__ = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase ) lowerCamelCase__ = Image.open(__lowerCAmelCase ).convert("""RGB""" ) lowerCamelCase__ = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 ) lowerCamelCase__ = model(__lowerCAmelCase ) if "detection" in checkpoint_url: lowerCamelCase__ = (1, 15, 3) lowerCamelCase__ = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) lowerCamelCase__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: lowerCamelCase__ = (1, 125, 7) lowerCamelCase__ = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) lowerCamelCase__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) lowerCamelCase__ = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(__lowerCAmelCase ) image_processor.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCamelCase : str = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=0 ): lowerCamelCase__ = [] for old_item in old_list: lowerCamelCase__ = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCamelCase__ = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCamelCase__ = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCamelCase__ = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCamelCase__ = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCamelCase__ = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCamelCase__ = shave_segments(__lowerCAmelCase , n_shave_prefix_segments=__lowerCAmelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=0 ): lowerCamelCase__ = [] for old_item in old_list: lowerCamelCase__ = old_item lowerCamelCase__ = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCamelCase__ = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCamelCase__ = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCamelCase__ = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCamelCase__ = shave_segments(__lowerCAmelCase , n_shave_prefix_segments=__lowerCAmelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Tuple=None ): assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCamelCase__ = old_checkpoint[path] lowerCamelCase__ = old_tensor.shape[0] // 3 lowerCamelCase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCamelCase__ = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCamelCase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = old_tensor.split(channels // num_heads , dim=1 ) lowerCamelCase__ = query.reshape(__lowerCAmelCase ) lowerCamelCase__ = key.reshape(__lowerCAmelCase ) lowerCamelCase__ = value.reshape(__lowerCAmelCase ) for path in paths: lowerCamelCase__ = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCamelCase__ = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCamelCase__ = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCamelCase__ = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCamelCase__ = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCamelCase__ = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCamelCase__ = old_checkpoint[path["""old"""]] def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ): lowerCamelCase__ = {} lowerCamelCase__ = checkpoint["""time_embed.0.weight"""] lowerCamelCase__ = checkpoint["""time_embed.0.bias"""] lowerCamelCase__ = checkpoint["""time_embed.2.weight"""] lowerCamelCase__ = checkpoint["""time_embed.2.bias"""] lowerCamelCase__ = checkpoint["""input_blocks.0.0.weight"""] lowerCamelCase__ = checkpoint["""input_blocks.0.0.bias"""] lowerCamelCase__ = checkpoint["""out.0.weight"""] lowerCamelCase__ = checkpoint["""out.0.bias"""] lowerCamelCase__ = checkpoint["""out.2.weight"""] lowerCamelCase__ = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCamelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCamelCase__ = { layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__lowerCAmelCase ) } # Retrieves the keys for the middle blocks only lowerCamelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCamelCase__ = { layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__lowerCAmelCase ) } # Retrieves the keys for the output blocks only lowerCamelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCamelCase__ = { layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__lowerCAmelCase ) } for i in range(1 , __lowerCAmelCase ): lowerCamelCase__ = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCamelCase__ = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCamelCase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] lowerCamelCase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: lowerCamelCase__ = checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] lowerCamelCase__ = checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue lowerCamelCase__ = renew_resnet_paths(__lowerCAmelCase ) lowerCamelCase__ = {"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} lowerCamelCase__ = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path, resnet_op] , config=__lowerCAmelCase ) if len(__lowerCAmelCase ): lowerCamelCase__ = renew_attention_paths(__lowerCAmelCase ) lowerCamelCase__ = { """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } lowerCamelCase__ = { F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=__lowerCAmelCase , config=__lowerCAmelCase , ) lowerCamelCase__ = middle_blocks[0] lowerCamelCase__ = middle_blocks[1] lowerCamelCase__ = middle_blocks[2] lowerCamelCase__ = renew_resnet_paths(__lowerCAmelCase ) assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase ) lowerCamelCase__ = renew_resnet_paths(__lowerCAmelCase ) assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase ) lowerCamelCase__ = renew_attention_paths(__lowerCAmelCase ) lowerCamelCase__ = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , attention_paths_to_split=__lowerCAmelCase , config=__lowerCAmelCase ) for i in range(__lowerCAmelCase ): lowerCamelCase__ = i // (config["""num_res_blocks"""] + 1) lowerCamelCase__ = i % (config["""num_res_blocks"""] + 1) lowerCamelCase__ = [shave_segments(__lowerCAmelCase , 2 ) for name in output_blocks[i]] lowerCamelCase__ = {} for layer in output_block_layers: lowerCamelCase__ , lowerCamelCase__ = layer.split(""".""" )[0], shave_segments(__lowerCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__lowerCAmelCase ) else: lowerCamelCase__ = [layer_name] if len(__lowerCAmelCase ) > 1: lowerCamelCase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] lowerCamelCase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] lowerCamelCase__ = renew_resnet_paths(__lowerCAmelCase ) lowerCamelCase__ = renew_resnet_paths(__lowerCAmelCase ) lowerCamelCase__ = {"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCamelCase__ = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCamelCase__ = checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] lowerCamelCase__ = checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__lowerCAmelCase ) == 2: lowerCamelCase__ = [] if len(__lowerCAmelCase ): lowerCamelCase__ = renew_attention_paths(__lowerCAmelCase ) lowerCamelCase__ = { """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } lowerCamelCase__ = { F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__lowerCAmelCase , ) else: lowerCamelCase__ = renew_resnet_paths(__lowerCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCamelCase__ = """.""".join(["""output_blocks""", str(__lowerCAmelCase ), path["""old"""]] ) lowerCamelCase__ = """.""".join(["""up_blocks""", str(__lowerCAmelCase ), """resnets""", str(__lowerCAmelCase ), path["""new"""]] ) lowerCamelCase__ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') UpperCamelCase : List[str] = parser.parse_args() UpperCamelCase : List[str] = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCamelCase : int = json.loads(f.read()) UpperCamelCase : Any = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCamelCase : Tuple = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCamelCase : Dict = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) UpperCamelCase : Dict = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) UpperCamelCase : str = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' from __future__ import annotations UpperCamelCase : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[list[int]] , ): lowerCamelCase__ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCAmelCase ) ) ] # the reference grid lowerCamelCase__ = 1 lowerCamelCase__ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCAmelCase ) ) ] # the action grid lowerCamelCase__ = init[0] lowerCamelCase__ = init[1] lowerCamelCase__ = 0 lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell lowerCamelCase__ = [[f, g, x, y]] lowerCamelCase__ = False # flag that is set when search is complete lowerCamelCase__ = False # flag set if we can't find expand while not found and not resign: if len(__lowerCAmelCase ) == 0: raise ValueError("""Algorithm is unable to find solution""" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCamelCase__ = cell.pop() lowerCamelCase__ = next_cell[2] lowerCamelCase__ = next_cell[3] lowerCamelCase__ = next_cell[1] if x == goal[0] and y == goal[1]: lowerCamelCase__ = True else: for i in range(len(__lowerCAmelCase ) ): # to try out different valid actions lowerCamelCase__ = x + DIRECTIONS[i][0] lowerCamelCase__ = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__lowerCAmelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCamelCase__ = g + cost lowerCamelCase__ = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCamelCase__ = 1 lowerCamelCase__ = i lowerCamelCase__ = [] lowerCamelCase__ = goal[0] lowerCamelCase__ = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0] lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1] lowerCamelCase__ = xa lowerCamelCase__ = ya invpath.append([x, y] ) lowerCamelCase__ = [] for i in range(len(__lowerCAmelCase ) ): path.append(invpath[len(__lowerCAmelCase ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCamelCase : List[Any] = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCamelCase : List[Any] = [0, 0] # all coordinates are given in format [y,x] UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1] UpperCamelCase : Dict = 1 # the cost map which pushes the path closer to the goal UpperCamelCase : Tuple = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCamelCase : str = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCamelCase : Any = 99 UpperCamelCase , UpperCamelCase : int = search(grid, init, goal, cost, heuristic) print('ACTION MAP') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int=None ): lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} lowerCamelCase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json() lowerCamelCase__ = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(__lowerCAmelCase ): lowerCamelCase__ = requests.get(url + F'''&page={i + 2}''' , headers=__lowerCAmelCase ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict=None ): lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} lowerCamelCase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json() lowerCamelCase__ = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(__lowerCAmelCase ): lowerCamelCase__ = requests.get(url + F'''&page={i + 2}''' , headers=__lowerCAmelCase ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase , allow_redirects=__lowerCAmelCase ) lowerCamelCase__ = result.headers["""Location"""] lowerCamelCase__ = requests.get(__lowerCAmelCase , allow_redirects=__lowerCAmelCase ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{artifact_name}.zip''' ) with open(__lowerCAmelCase , """wb""" ) as fp: fp.write(response.content ) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str]=None ): lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = None with zipfile.ZipFile(__lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowerCAmelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__lowerCAmelCase ) as f: for line in f: lowerCamelCase__ = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs lowerCamelCase__ = line[: line.index(""": """ )] lowerCamelCase__ = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed lowerCamelCase__ = line[len("""FAILED """ ) :] failed_tests.append(__lowerCAmelCase ) elif filename == "job_name.txt": lowerCamelCase__ = line if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCAmelCase )} for `errors` ''' F'''and {len(__lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' """ problem.""" ) lowerCamelCase__ = None if job_name and job_links: lowerCamelCase__ = job_links.get(__lowerCAmelCase , __lowerCAmelCase ) # A list with elements of the form (line of error, error, failed test) lowerCamelCase__ = [x + [y] + [job_link] for x, y in zip(__lowerCAmelCase , __lowerCAmelCase )] return result def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=None ): lowerCamelCase__ = [] lowerCamelCase__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(__lowerCAmelCase , job_links=__lowerCAmelCase ) ) return errors def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=None ): lowerCamelCase__ = Counter() counter.update([x[1] for x in logs] ) lowerCamelCase__ = counter.most_common() lowerCamelCase__ = {} for error, count in counts: if error_filter is None or error not in error_filter: lowerCamelCase__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=__lowerCAmelCase ) ) return r def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): lowerCamelCase__ = test.split("""/""" )[2] else: lowerCamelCase__ = None return test def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any=None ): lowerCamelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs] lowerCamelCase__ = [x for x in logs if x[2] is not None] lowerCamelCase__ = {x[2] for x in logs} lowerCamelCase__ = {} for test in tests: lowerCamelCase__ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) lowerCamelCase__ = counter.most_common() lowerCamelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} lowerCamelCase__ = sum(error_counts.values() ) if n_errors > 0: lowerCamelCase__ = {"""count""": n_errors, """errors""": error_counts} lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=__lowerCAmelCase ) ) return r def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = """| no. | error | status |""" lowerCamelCase__ = """|-:|:-|:-|""" lowerCamelCase__ = [header, sep] for error in reduced_by_error: lowerCamelCase__ = reduced_by_error[error]["""count"""] lowerCamelCase__ = F'''| {count} | {error[:100]} | |''' lines.append(__lowerCAmelCase ) return "\n".join(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = """| model | no. of errors | major error | count |""" lowerCamelCase__ = """|-:|-:|-:|-:|""" lowerCamelCase__ = [header, sep] for model in reduced_by_model: lowerCamelCase__ = reduced_by_model[model]["""count"""] lowerCamelCase__ , lowerCamelCase__ = list(reduced_by_model[model]["""errors"""].items() )[0] lowerCamelCase__ = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(__lowerCAmelCase ) return "\n".join(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') UpperCamelCase : List[Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) UpperCamelCase : int = get_job_links(args.workflow_run_id, token=args.token) UpperCamelCase : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: UpperCamelCase : int = k.find(' / ') UpperCamelCase : List[str] = k[index + len(' / ') :] UpperCamelCase : Any = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) UpperCamelCase : Dict = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) UpperCamelCase : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error UpperCamelCase : Union[str, Any] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors UpperCamelCase : Any = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) UpperCamelCase : str = reduce_by_error(errors) UpperCamelCase : Any = reduce_by_model(errors) UpperCamelCase : Optional[int] = make_github_table(reduced_by_error) UpperCamelCase : List[Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['input_features', 'is_longer'] def __init__( self ,_lowerCAmelCase=64 ,_lowerCAmelCase=4_80_00 ,_lowerCAmelCase=4_80 ,_lowerCAmelCase=10 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=False ,_lowerCAmelCase = 0 ,_lowerCAmelCase = 1_40_00 ,_lowerCAmelCase = None ,_lowerCAmelCase = "fusion" ,_lowerCAmelCase = "repeatpad" ,**_lowerCAmelCase ,): super().__init__( feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = top_db lowerCamelCase__ = truncation lowerCamelCase__ = padding lowerCamelCase__ = fft_window_size lowerCamelCase__ = (fft_window_size >> 1) + 1 lowerCamelCase__ = hop_length lowerCamelCase__ = max_length_s lowerCamelCase__ = max_length_s * sampling_rate lowerCamelCase__ = sampling_rate lowerCamelCase__ = frequency_min lowerCamelCase__ = frequency_max lowerCamelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_lowerCAmelCase ,min_frequency=_lowerCAmelCase ,max_frequency=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,norm=_lowerCAmelCase ,mel_scale="""htk""" ,) lowerCamelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_lowerCAmelCase ,min_frequency=_lowerCAmelCase ,max_frequency=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,norm="""slaney""" ,mel_scale="""slaney""" ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = spectrogram( _lowerCAmelCase ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_lowerCAmelCase ,log_mel="""dB""" ,) return log_mel_spectrogram.T def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase__ = [0] # randomly choose index for each part lowerCamelCase__ = np.random.choice(ranges[0] ) lowerCamelCase__ = np.random.choice(ranges[1] ) lowerCamelCase__ = np.random.choice(ranges[2] ) lowerCamelCase__ = mel[idx_front : idx_front + chunk_frames, :] lowerCamelCase__ = mel[idx_middle : idx_middle + chunk_frames, :] lowerCamelCase__ = mel[idx_back : idx_back + chunk_frames, :] lowerCamelCase__ = torch.tensor(mel[None, None, :] ) lowerCamelCase__ = torch.nn.functional.interpolate( _lowerCAmelCase ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_lowerCAmelCase ) lowerCamelCase__ = mel_shrink[0][0].numpy() lowerCamelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCamelCase__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCamelCase__ = len(_lowerCAmelCase ) - max_length lowerCamelCase__ = np.random.randint(0 ,overflow + 1 ) lowerCamelCase__ = waveform[idx : idx + max_length] lowerCamelCase__ = self._np_extract_fbank_features(_lowerCAmelCase ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCamelCase__ = self._np_extract_fbank_features(_lowerCAmelCase ,self.mel_filters ) lowerCamelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCamelCase__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCamelCase__ = np.stack([mel, mel, mel, mel] ,axis=0 ) lowerCamelCase__ = False else: lowerCamelCase__ = self._random_mel_fusion(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: lowerCamelCase__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCamelCase__ = int(max_length / len(_lowerCAmelCase ) ) lowerCamelCase__ = np.stack(np.tile(_lowerCAmelCase ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCamelCase__ = int(max_length / len(_lowerCAmelCase ) ) lowerCamelCase__ = np.stack(np.tile(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = np.pad(_lowerCAmelCase ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": lowerCamelCase__ = self._np_extract_fbank_features(_lowerCAmelCase ,self.mel_filters ) lowerCamelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: lowerCamelCase__ = self._np_extract_fbank_features(_lowerCAmelCase ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = truncation if truncation is not None else self.truncation lowerCamelCase__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase__ = is_batched_numpy or ( isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ): lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa ) elif isinstance(_lowerCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ = [np.asarray(_lowerCAmelCase )] # convert to mel spectrogram, truncate and pad if needed. lowerCamelCase__ = [ self._get_input_mel(_lowerCAmelCase ,max_length if max_length else self.nb_max_samples ,_lowerCAmelCase ,_lowerCAmelCase ) for waveform in raw_speech ] lowerCamelCase__ = [] lowerCamelCase__ = [] for mel, longer in padded_inputs: input_mel.append(_lowerCAmelCase ) is_longer.append(_lowerCAmelCase ) if truncation == "fusion" and sum(_lowerCAmelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCamelCase__ = np.random.randint(0 ,len(_lowerCAmelCase ) ) lowerCamelCase__ = True if isinstance(input_mel[0] ,_lowerCAmelCase ): lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCamelCase__ = [[longer] for longer in is_longer] lowerCamelCase__ = {"""input_features""": input_mel, """is_longer""": is_longer} lowerCamelCase__ = BatchFeature(_lowerCAmelCase ) if return_tensors is not None: lowerCamelCase__ = input_features.convert_to_tensors(_lowerCAmelCase ) return input_features
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) UpperCamelCase : Optional[int] = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ): lowerCamelCase__ = None lowerCamelCase__ = os.path.abspath(os.path.join("""examples""" ,"""by_feature""" ) ) lowerCamelCase__ = os.path.abspath("""examples""" ) for item in os.listdir(_lowerCAmelCase ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=_lowerCAmelCase ,feature_script=_lowerCAmelCase ,tested_section="""main()""" if parser_only else """training_function()""" ,): lowerCamelCase__ = compare_against_test( os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = """\n""".join(_lowerCAmelCase ) if special_strings is not None: for string in special_strings: lowerCamelCase__ = diff.replace(_lowerCAmelCase ,"""""" ) self.assertEqual(_lowerCAmelCase ,"""""" ) def UpperCamelCase_ ( self ): self.one_complete_example("""complete_nlp_example.py""" ,_lowerCAmelCase ) self.one_complete_example("""complete_nlp_example.py""" ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = os.path.abspath(os.path.join("""examples""" ,"""cv_example.py""" ) ) lowerCamelCase__ = [ """ """ * 16 + """{\n\n""", """ """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""", """ """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""", """ """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""", """ """ * 20 + """\"epoch\": epoch,\n\n""", """ """ * 16 + """},\n\n""", """ """ * 16 + """step=epoch,\n""", """ """ * 12, """ """ * 8 + """for step, batch in enumerate(active_dataloader):\n""", ] self.one_complete_example("""complete_cv_example.py""" ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) self.one_complete_example("""complete_cv_example.py""" ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) @mock.patch.dict(os.environ ,{'TESTING_MOCKED_DATALOADERS': '1'} ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = False @classmethod def UpperCamelCase_ ( cls ): super().setUpClass() lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = os.path.join(cls._tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) lowerCamelCase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def UpperCamelCase_ ( cls ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCamelCase_ ( self ): lowerCamelCase__ = F''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"""epoch_0""" ) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = F''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() lowerCamelCase__ = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"""step_2""" ) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir ,"epoch_0" )} '''.split() lowerCamelCase__ = run_command(self._launch_args + testargs ,return_stdout=_lowerCAmelCase ) self.assertNotIn("""epoch 0:""" ,_lowerCAmelCase ) self.assertIn("""epoch 1:""" ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir ,"step_2" )} '''.split() lowerCamelCase__ = run_command(self._launch_args + testargs ,return_stdout=_lowerCAmelCase ) if torch.cuda.is_available(): lowerCamelCase__ = torch.cuda.device_count() else: lowerCamelCase__ = 1 if num_processes > 1: self.assertNotIn("""epoch 0:""" ,_lowerCAmelCase ) self.assertIn("""epoch 1:""" ,_lowerCAmelCase ) else: self.assertIn("""epoch 0:""" ,_lowerCAmelCase ) self.assertIn("""epoch 1:""" ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ ,{"""TESTING_MOCKED_DATALOADERS""": """0"""} ): lowerCamelCase__ = run_command(self._launch_args + testargs ,return_stdout=_lowerCAmelCase ) lowerCamelCase__ = re.findall("""({.+})""" ,_lowerCAmelCase ) lowerCamelCase__ = [r for r in results if """accuracy""" in r][-1] lowerCamelCase__ = ast.literal_eval(_lowerCAmelCase ) self.assertGreaterEqual(results["""accuracy"""] ,0.75 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ["""examples/by_feature/multi_process_metrics.py"""] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def UpperCamelCase_ ( self ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase__ = F''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase ,"""tracking""" ) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ["""examples/by_feature/gradient_accumulation.py"""] run_command(self._launch_args + testargs ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ["""examples/by_feature/local_sgd.py"""] run_command(self._launch_args + testargs )
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = """ZinengTang/tvlt-base""" lowerCamelCase__ = tempfile.mkdtemp() def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return TvltImageProcessor.from_pretrained(self.checkpoint ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_feature_extractor() lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor ,_lowerCAmelCase ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_feature_extractor() lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ) lowerCamelCase__ = np.ones([1_20_00] ) lowerCamelCase__ = feature_extractor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(audio=_lowerCAmelCase ,return_tensors="""np""" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_feature_extractor() lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ) lowerCamelCase__ = np.ones([3, 2_24, 2_24] ) lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_feature_extractor() lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ) lowerCamelCase__ = np.ones([1_20_00] ) lowerCamelCase__ = np.ones([3, 2_24, 2_24] ) lowerCamelCase__ = processor(audio=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_feature_extractor() lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ) self.assertListEqual( processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCamelCase : List[Any] = namedtuple( '_TestCommandArgs', [ 'dataset', 'name', 'cache_dir', 'data_dir', 'all_configs', 'save_infos', 'ignore_verifications', 'force_redownload', 'clear_cache', ], defaults=[None, None, None, False, False, False, False, False], ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ = _TestCommandArgs(dataset=__lowerCAmelCase , all_configs=__lowerCAmelCase , save_infos=__lowerCAmelCase ) lowerCamelCase__ = TestCommand(*__lowerCAmelCase ) test_command.run() lowerCamelCase__ = os.path.join(__lowerCAmelCase , """README.md""" ) assert os.path.exists(__lowerCAmelCase ) lowerCamelCase__ = DatasetInfosDict.from_directory(__lowerCAmelCase ) lowerCamelCase__ = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 235_1563, """num_examples""": 1_0000, }, { """name""": """validation""", """num_bytes""": 23_8418, """num_examples""": 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos["""default"""] , __lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , __lowerCAmelCase ) if key == "num_bytes": assert is_apercent_close(__lowerCAmelCase , __lowerCAmelCase ) elif key == "splits": assert list(__lowerCAmelCase ) == list(__lowerCAmelCase ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def A__ ( __lowerCAmelCase : Namespace ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) UpperCamelCase : Dict = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class UpperCamelCase__ (a ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = parser.add_parser( """convert""" ,help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" ,) train_parser.add_argument("""--model_type""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" ,type=_lowerCAmelCase ,default="""""" ,help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,help="""Optional fine-tuning task name if the TF model was a finetuned model.""" ,) train_parser.set_defaults(func=_lowerCAmelCase ) def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,*_lowerCAmelCase ,): lowerCamelCase__ = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(F'''Loading model {model_type}''' ) lowerCamelCase__ = model_type lowerCamelCase__ = tf_checkpoint lowerCamelCase__ = pytorch_dump_output lowerCamelCase__ = config lowerCamelCase__ = finetuning_task_name def UpperCamelCase_ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) if "ckpt" in self._tf_checkpoint.lower(): lowerCamelCase__ = self._tf_checkpoint lowerCamelCase__ = """""" else: lowerCamelCase__ = self._tf_checkpoint lowerCamelCase__ = """""" convert_transfo_xl_checkpoint_to_pytorch( _lowerCAmelCase ,self._config ,self._pytorch_dump_output ,_lowerCAmelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=16 ,_lowerCAmelCase=[1, 2, 1] ,_lowerCAmelCase=[2, 2, 4] ,_lowerCAmelCase=2 ,_lowerCAmelCase=2.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=8 ,_lowerCAmelCase=["stage1", "stage2", "stage3"] ,_lowerCAmelCase=[1, 2, 3] ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = patch_norm lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range lowerCamelCase__ = is_training lowerCamelCase__ = scope lowerCamelCase__ = use_labels lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = encoder_stride lowerCamelCase__ = out_features lowerCamelCase__ = out_indices def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return MaskFormerSwinConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = MaskFormerSwinModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) lowerCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = MaskFormerSwinBackbone(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,[16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_lowerCAmelCase ): lowerCamelCase__ = ["""stem"""] lowerCamelCase__ = MaskFormerSwinBackbone(config=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _UpperCamelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskFormerSwinModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): return def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCAmelCase ) @unittest.skip("""Swin does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip("""Swin does not support feedforward chunking""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.hidden_states lowerCamelCase__ = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # Swin has a different seq_length lowerCamelCase__ = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase__ = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,(padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_lowerCAmelCase ): lowerCamelCase__ = 0 return t def check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase={} ): with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ,return_dict=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,return_dict=_lowerCAmelCase ,**_lowerCAmelCase ).to_tuple() def recursive_check(_lowerCAmelCase ,_lowerCAmelCase ): if isinstance(_lowerCAmelCase ,(List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase ,_lowerCAmelCase ): recursive_check(_lowerCAmelCase ,_lowerCAmelCase ) elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() ,dict_object.values() ): recursive_check(_lowerCAmelCase ,_lowerCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_lowerCAmelCase ) ,set_nan_tensor_to_zero(_lowerCAmelCase ) ,atol=1E-5 ) ,msg=( """Tuple and dict output are not equal. Difference:""" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has''' F''' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.''' ) ,) recursive_check(_lowerCAmelCase ,_lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,{"""output_hidden_states""": True} ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,{"""output_hidden_states""": True} ) @require_torch class UpperCamelCase__ (unittest.TestCase ,a ): '''simple docstring''' _UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else () _UpperCamelCase = MaskFormerSwinConfig def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskFormerSwinModelTester(self ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: lowerCamelCase__ = backbone_class(_lowerCAmelCase ) backbone.to(_lowerCAmelCase ) backbone.eval() lowerCamelCase__ = backbone(**_lowerCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps ,_lowerCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ): self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCamelCase__ = backbone(**_lowerCAmelCase ,output_hidden_states=_lowerCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCamelCase__ = backbone(**_lowerCAmelCase ,output_attentions=_lowerCAmelCase ) self.assertIsNotNone(outputs.attentions )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase : str = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['audio_values', 'audio_mask'] def __init__( self ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=1 ,_lowerCAmelCase=[16, 16] ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=4_41_00 ,_lowerCAmelCase=86 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=0.0 ,**_lowerCAmelCase ,): super().__init__( feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = spectrogram_length lowerCamelCase__ = num_channels lowerCamelCase__ = patch_size lowerCamelCase__ = feature_size // self.patch_size[1] lowerCamelCase__ = n_fft lowerCamelCase__ = sampling_rate // hop_length_to_sampling_rate lowerCamelCase__ = sampling_rate lowerCamelCase__ = padding_value lowerCamelCase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_lowerCAmelCase ,min_frequency=0.0 ,max_frequency=2_2050.0 ,sampling_rate=_lowerCAmelCase ,norm="""slaney""" ,mel_scale="""slaney""" ,).T def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = spectrogram( _lowerCAmelCase ,window_function(self.n_fft ,"""hann""" ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel="""dB""" ,db_range=80.0 ,) lowerCamelCase__ = log_spec[:, :-1] lowerCamelCase__ = log_spec - 20.0 lowerCamelCase__ = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0 return log_spec def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,**_lowerCAmelCase ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase__ = is_batched_numpy or ( isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ): lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa ) elif isinstance(_lowerCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] ,_lowerCAmelCase ): lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase__ = np.array(_lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase__ = np.ones([len(_lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase__ = padded_audio_features * self.padding_value for i in range(len(_lowerCAmelCase ) ): lowerCamelCase__ = audio_features[i] lowerCamelCase__ = feature # return as BatchFeature if return_attention_mask: lowerCamelCase__ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase__ = {"""audio_values""": padded_audio_features} lowerCamelCase__ = BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase ) return encoded_inputs
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): return 0.0 def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ): lowerCamelCase__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __lowerCAmelCase : FilterType , __lowerCAmelCase : int ): lowerCamelCase__ = 512 lowerCamelCase__ = [1] + [0] * (size - 1) lowerCamelCase__ = [filter_type.process(__lowerCAmelCase ) for item in inputs] lowerCamelCase__ = [0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase__ = np.abs(np.fft.fft(__lowerCAmelCase ) ) lowerCamelCase__ = 20 * np.logaa(__lowerCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds lowerCamelCase__ = get_bounds(__lowerCAmelCase , __lowerCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__lowerCAmelCase ) plt.show() def A__ ( __lowerCAmelCase : FilterType , __lowerCAmelCase : int ): lowerCamelCase__ = 512 lowerCamelCase__ = [1] + [0] * (size - 1) lowerCamelCase__ = [filter_type.process(__lowerCAmelCase ) for item in inputs] lowerCamelCase__ = [0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase__ = np.angle(np.fft.fft(__lowerCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__lowerCAmelCase , -2 * pi ) ) plt.show()
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import math from collections.abc import Callable def A__ ( __lowerCAmelCase : Callable[[float], float] , __lowerCAmelCase : float , __lowerCAmelCase : float ): lowerCamelCase__ = xa lowerCamelCase__ = xa while True: if x_n == x_na or function(__lowerCAmelCase ) == function(__lowerCAmelCase ): raise ZeroDivisionError("""float division by zero, could not find root""" ) lowerCamelCase__ = x_na - ( function(__lowerCAmelCase ) / ((function(__lowerCAmelCase ) - function(__lowerCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na lowerCamelCase__ = x_na lowerCamelCase__ = x_na def A__ ( __lowerCAmelCase : float ): return math.pow(__lowerCAmelCase , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : str = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'dpr' def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase = 0 ,**_lowerCAmelCase ,): super().__init__(pad_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = hidden_act lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = projection_dim lowerCamelCase__ = position_embedding_type
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ): assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ): lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ): lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowerCamelCase__ = features.copy() if features else default_expected_features lowerCamelCase__ = ( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowerCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): if issubclass(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = parquet_path elif issubclass(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [parquet_path] lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowerCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any=("train",) ): assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for split in splits: lowerCamelCase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any ): lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__ = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowerCamelCase__ = features.copy() if features else default_expected_features lowerCamelCase__ = ( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__ = ParquetDatasetReader({"""train""": parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ): if split: lowerCamelCase__ = {split: parquet_path} else: lowerCamelCase__ = """train""" lowerCamelCase__ = {"""train""": parquet_path, """test""": parquet_path} lowerCamelCase__ = tmp_path / """cache""" lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowerCamelCase__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ): lowerCamelCase__ = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / """foo.parquet""" ) assert writer.write() > 0 lowerCamelCase__ = pq.ParquetFile(tmp_path / """foo.parquet""" ) lowerCamelCase__ = pf.read() assert dataset.data.table == output_table def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ = str(shared_datadir / """test_image_rgb.jpg""" ) lowerCamelCase__ = {"""image""": [image_path]} lowerCamelCase__ = Features({"""image""": Image()} ) lowerCamelCase__ = Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase ) lowerCamelCase__ = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / """foo.parquet""" ) assert writer.write() > 0 lowerCamelCase__ = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__ = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): assert get_writer_batch_size(__lowerCAmelCase ) == expected
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] ): if not numbers: return 0 if not isinstance(__lowerCAmelCase , (list, tuple) ) or not all( isinstance(__lowerCAmelCase , __lowerCAmelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = numbers[0] for i in range(1 , len(__lowerCAmelCase ) ): # update the maximum and minimum subarray products lowerCamelCase__ = numbers[i] if number < 0: lowerCamelCase__ , lowerCamelCase__ = min_till_now, max_till_now lowerCamelCase__ = max(__lowerCAmelCase , max_till_now * number ) lowerCamelCase__ = min(__lowerCAmelCase , min_till_now * number ) # update the maximum product found till now lowerCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase ) return max_prod
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase : Dict = logging.get_logger(__name__) enable_full_determinism() class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = UNetaDModel _UpperCamelCase = 'sample' @property def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = (32, 32) lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] ).to(_lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase_ ( self ): return (3, 32, 32) @property def UpperCamelCase_ ( self ): return (3, 32, 32) def UpperCamelCase_ ( self ): lowerCamelCase__ = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = UNetaDModel _UpperCamelCase = 'sample' @property def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 4 lowerCamelCase__ = (32, 32) lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] ).to(_lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase_ ( self ): return (4, 32, 32) @property def UpperCamelCase_ ( self ): return (4, 32, 32) def UpperCamelCase_ ( self ): lowerCamelCase__ = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(_lowerCAmelCase ) lowerCamelCase__ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ) model.to(_lowerCAmelCase ) lowerCamelCase__ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" ) def UpperCamelCase_ ( self ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ) model_accelerate.to(_lowerCAmelCase ) model_accelerate.eval() lowerCamelCase__ = torch.randn( 1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,) lowerCamelCase__ = noise.to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(_lowerCAmelCase ) lowerCamelCase__ = model_accelerate(_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ,low_cpu_mem_usage=_lowerCAmelCase ) model_normal_load.to(_lowerCAmelCase ) model_normal_load.eval() lowerCamelCase__ = model_normal_load(_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] assert torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(_lowerCAmelCase ) lowerCamelCase__ = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) lowerCamelCase__ = noise.to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase__ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 ) ) class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = UNetaDModel _UpperCamelCase = 'sample' @property def UpperCamelCase_ ( self ,_lowerCAmelCase=(32, 32) ): lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=_lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase_ ( self ): return (3, 32, 32) @property def UpperCamelCase_ ( self ): return (3, 32, 32) def UpperCamelCase_ ( self ): lowerCamelCase__ = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict @slow def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ,output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(_lowerCAmelCase ) lowerCamelCase__ = self.dummy_input lowerCamelCase__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(_lowerCAmelCase ) lowerCamelCase__ = noise lowerCamelCase__ = model(**_lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(_lowerCAmelCase ) lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = (2_56, 2_56) lowerCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCamelCase__ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] ) # fmt: on self.assertTrue(torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-2 ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(_lowerCAmelCase ) lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = (32, 32) lowerCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCamelCase__ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-2 ) ) def UpperCamelCase_ ( self ): # not required for this model pass
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def UpperCamelCase_ ( self ): lowerCamelCase__ = self.dummy_uncond_unet lowerCamelCase__ = ScoreSdeVeScheduler() lowerCamelCase__ = ScoreSdeVePipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_lowerCAmelCase ).images lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_lowerCAmelCase ,return_dict=_lowerCAmelCase )[ 0 ] lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = """google/ncsnpp-church-256""" lowerCamelCase__ = UNetaDModel.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = ScoreSdeVeScheduler.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = ScoreSdeVePipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=10 ,output_type="""numpy""" ,generator=_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) lowerCamelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1