code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): a__ : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_attention_heads" ) ) class A__ : """simple docstring""" def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : Dict=64 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : int=1 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : List[str]=[128, 256, 384] , lowerCamelCase__ : Optional[int]=[4, 6, 8] , lowerCamelCase__ : Optional[int]=[2, 3, 4] , lowerCamelCase__ : Tuple=[16, 16, 16] , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Optional[Any]=[2, 2, 2] , lowerCamelCase__ : int=[2, 2, 2] , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=2 , ): a__ : Union[str, Any] = parent a__ : List[Any] = batch_size a__ : Dict = image_size a__ : Any = num_channels a__ : Union[str, Any] = kernel_size a__ : str = stride a__ : List[str] = padding a__ : Tuple = hidden_sizes a__ : Any = num_attention_heads a__ : List[str] = depths a__ : Any = key_dim a__ : Tuple = drop_path_rate a__ : Union[str, Any] = patch_size a__ : Optional[int] = attention_ratio a__ : int = mlp_ratio a__ : Union[str, Any] = initializer_range a__ : List[Any] = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] a__ : Tuple = is_training a__ : List[str] = use_labels a__ : List[Any] = num_labels a__ : List[Any] = initializer_range def _UpperCamelCase( self : Dict ): a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Any = None if self.use_labels: a__ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) a__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def _UpperCamelCase( self : Union[str, Any] ): return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ): a__ : List[Any] = LevitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) a__ : Optional[Any] = (self.image_size, self.image_size) a__, a__ : Optional[Any] = image_size[0], image_size[1] for _ in range(4 ): a__ : List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) a__ : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ): a__ : str = self.num_labels a__ : Tuple = LevitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : int = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase( self : str ): a__ : List[str] = self.prepare_config_and_inputs() a__, a__, a__ : str = config_and_inputs a__ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': LevitModel, 'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = LevitModelTester(self ) a__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase( self : Dict ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _UpperCamelCase( self : Union[str, Any] ): pass @unittest.skip(reason="Levit does not output attentions" ) def _UpperCamelCase( self : Optional[int] ): pass def _UpperCamelCase( self : str ): a__, a__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Dict = [*signature.parameters.keys()] a__ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Dict ): def check_hidden_states_output(lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ): a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : Dict = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) a__ : Optional[int] = outputs.hidden_states a__ : List[Any] = len(self.model_tester.depths ) + 1 self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ ) a__ : int = (self.model_tester.image_size, self.model_tester.image_size) a__, a__ : List[Any] = image_size[0], image_size[1] for _ in range(4 ): a__ : str = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) a__ : Union[str, Any] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) a__, a__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : str = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCamelCase( self : Optional[int] ): pass def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=False ): a__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _UpperCamelCase( self : List[str] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): if not self.model_tester.is_training: return a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Tuple = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCamelCase__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : int = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Optional[int] = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : str ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : Tuple = False a__ : Any = True for model_class in self.all_model_classes: if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue a__ : Any = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Any ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Union[str, Any] = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCamelCase__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ): a__ : Optional[Any] = problem_type["title"] a__ : Any = problem_type["num_labels"] a__ : Tuple = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Optional[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if problem_type["num_labels"] > 1: a__ : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) a__ : List[Any] = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list: a__ : Union[str, Any] = model(**lowerCamelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def _UpperCamelCase( self : int ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = LevitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> List[Any]: a__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[Any] ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _UpperCamelCase( self : List[Any] ): a__ : List[str] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : str = prepare_img() a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) # verify the logits a__ : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : Dict = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple=13 , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]=99 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Optional[int]=512 , lowerCamelCase__ : str=16 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : int=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1_000 , ): a__ : Union[str, Any] = parent a__ : Optional[Any] = batch_size a__ : Optional[int] = seq_length a__ : Any = is_training a__ : Tuple = use_input_mask a__ : str = use_token_type_ids a__ : int = use_labels a__ : int = vocab_size a__ : int = hidden_size a__ : List[str] = num_hidden_layers a__ : Tuple = num_attention_heads a__ : Any = intermediate_size a__ : Tuple = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : List[Any] = attention_probs_dropout_prob a__ : Dict = max_position_embeddings a__ : int = type_vocab_size a__ : str = type_sequence_label_size a__ : List[str] = initializer_range a__ : List[str] = num_labels a__ : Any = num_choices a__ : Union[str, Any] = scope a__ : Dict = range_bbox def _UpperCamelCase( self : Optional[Any] ): a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment a__ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a__ : int = bbox[i, j, 3] a__ : str = bbox[i, j, 1] a__ : int = t if bbox[i, j, 2] < bbox[i, j, 0]: a__ : Union[str, Any] = bbox[i, j, 2] a__ : int = bbox[i, j, 0] a__ : Dict = t a__ : List[str] = tf.convert_to_tensor(lowerCamelCase__ ) a__ : Union[str, Any] = None if self.use_input_mask: a__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a__ : List[Any] = None if self.use_token_type_ids: a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : str = None a__ : str = None a__ : int = None if self.use_labels: a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : str = ids_tensor([self.batch_size] , self.num_choices ) a__ : Tuple = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ): a__ : Any = TFLayoutLMModel(config=lowerCamelCase__ ) a__ : Any = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) a__ : List[Any] = model(lowerCamelCase__ , lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) a__ : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _UpperCamelCase( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : str ): a__ : int = TFLayoutLMForMaskedLM(config=lowerCamelCase__ ) a__ : Union[str, Any] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ): a__ : Any = self.num_labels a__ : Optional[Any] = TFLayoutLMForSequenceClassification(config=lowerCamelCase__ ) a__ : int = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Dict ): a__ : Any = self.num_labels a__ : Any = TFLayoutLMForTokenClassification(config=lowerCamelCase__ ) a__ : int = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ): a__ : Optional[int] = TFLayoutLMForQuestionAnswering(config=lowerCamelCase__ ) a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase( self : Dict ): a__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ) : List[Any] = config_and_inputs a__ : Optional[int] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowercase = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowercase = False _lowercase = True _lowercase = 1_0 def _UpperCamelCase( self : List[str] ): a__ : Optional[int] = TFLayoutLMModelTester(self ) a__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() def _UpperCamelCase( self : int ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : Any ): a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : Tuple ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : str = TFLayoutLMModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def _UpperCamelCase( self : Optional[int] ): pass def UpperCamelCase_ ( ) -> List[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off a__ : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 a__ : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 a__ : Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 a__ : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) a__ : List[str] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase( self : int ): a__ : Union[str, Any] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) a__, a__, a__, a__, a__ : Tuple = prepare_layoutlm_batch_inputs() # forward pass a__ : Optional[int] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) # test the sequence output on [0, :3, :3] a__ : Dict = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-3 ) ) # test the pooled output on [1, :3] a__ : Tuple = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCamelCase__ , atol=1E-3 ) ) @slow def _UpperCamelCase( self : Optional[int] ): # initialize model with randomly initialized sequence classification head a__ : Tuple = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) a__, a__, a__, a__, a__ : str = prepare_layoutlm_batch_inputs() # forward pass a__ : Optional[Any] = model( input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar a__ : Union[str, Any] = outputs.loss a__ : Tuple = (2,) self.assertEqual(loss.shape , lowerCamelCase__ ) # test the shape of the logits a__ : Dict = outputs.logits a__ : Any = (2, 2) self.assertEqual(logits.shape , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): # initialize model with randomly initialized token classification head a__ : str = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) a__, a__, a__, a__, a__ : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass a__ : Optional[Any] = model( input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) # test the shape of the logits a__ : Any = outputs.logits a__ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): # initialize model with randomly initialized token classification head a__ : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) a__, a__, a__, a__, a__ : Union[str, Any] = prepare_layoutlm_batch_inputs() # forward pass a__ : List[str] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) # test the shape of the logits a__ : Dict = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , lowerCamelCase__ ) self.assertEqual(outputs.end_logits.shape , lowerCamelCase__ )
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
from collections import defaultdict from math import gcd def UpperCamelCase_ ( __a = 1_500_000 ) -> int: a__ : defaultdict = defaultdict(__a ) a__ : Optional[int] = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __a , 2 ): if gcd(__a , __a ) > 1: continue a__ : Any = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__a , limit + 1 , __a ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f"""{solution() = }""")
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
def UpperCamelCase_ ( __a = 1_000_000 ) -> int: a__ : int = limit + 1 a__ : str = [0] * limit for first_term in range(1 , __a ): for n in range(__a , __a , __a ): a__ : Optional[int] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a a__ : Tuple = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
from __future__ import annotations from dataclasses import dataclass @dataclass class A__ : """simple docstring""" _lowercase = 42 _lowercase = None _lowercase = None def UpperCamelCase_ ( __a ) -> bool: # Validation def is_valid_tree(__a ) -> bool: if node is None: return True if not isinstance(__a , __a ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(__a ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( __a , __a , __a ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , __a , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , __a ) ) return is_binary_search_tree_recursive_check(__a , -float("inf" ) , float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import re from filelock import FileLock try: import nltk UpperCamelCase : Union[str, Any] = True except (ImportError, ModuleNotFoundError): UpperCamelCase : Optional[int] = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def UpperCamelCase_ ( __a ) -> str: re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
UpperCamelCase : Optional[int] = """Input must be a string of 8 numbers plus letter""" UpperCamelCase : Tuple = """TRWAGMYFPDXBNJZSQVHLCKE""" def UpperCamelCase_ ( __a ) -> bool: if not isinstance(__a , __a ): a__ : Dict = f'''Expected string as input, found {type(__a ).__name__}''' raise TypeError(__a ) a__ : str = spanish_id.replace("-" , "" ).upper() if len(__a ) != 9: raise ValueError(__a ) try: a__ : List[str] = int(spanish_id_clean[0:8] ) a__ : Any = spanish_id_clean[8] except ValueError as ex: raise ValueError(__a ) from ex if letter.isdigit(): raise ValueError(__a ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[str] = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = "huggingface/label-files" a__ : Tuple = "imagenet-1k-id2label.json" a__ : Dict = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : List[str] = {int(__a ): v for k, v in idalabel.items()} a__ : List[Any] = {v: k for k, v in idalabel.items()} a__ : Dict = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" a__ : Tuple = BitConfig( conv_layer=__a , num_labels=1_000 , idalabel=__a , labelaid=__a , ) return config def UpperCamelCase_ ( __a ) -> Any: if "stem.conv" in name: a__ : Dict = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: a__ : str = name.replace("blocks" , "layers" ) if "head.fc" in name: a__ : Union[str, Any] = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): a__ : List[Any] = "bit." + name if "bit" not in name and "classifier" not in name: a__ : List[Any] = "bit.encoder." + name return name def UpperCamelCase_ ( ) -> Optional[Any]: a__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( __a , __a , __a=False ) -> Any: a__ : Optional[int] = get_config(__a ) # load original model from timm a__ : List[str] = create_model(__a , pretrained=__a ) timm_model.eval() # load state_dict of original model a__ : Optional[int] = timm_model.state_dict() for key in state_dict.copy().keys(): a__ : Tuple = state_dict.pop(__a ) a__ : str = val.squeeze() if "head" in key else val # load HuggingFace model a__ : str = BitForImageClassification(__a ) model.eval() model.load_state_dict(__a ) # create image processor a__ : List[Any] = create_transform(**resolve_data_config({} , model=__a ) ) a__ : Any = transform.transforms a__ : Optional[int] = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } a__ : str = BitImageProcessor( do_resize=__a , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__a , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) a__ : Tuple = prepare_img() a__ : Dict = transform(__a ).unsqueeze(0 ) a__ : List[Any] = processor(__a , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__a , __a ) # verify logits with torch.no_grad(): a__ : int = model(__a ) a__ : str = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) a__ : Tuple = timm_model(__a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__a , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__a ).mkdir(exist_ok=__a ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) processor.save_pretrained(__a ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) UpperCamelCase : List[Any] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase : Optional[int] = get_tests_dir("""fixtures""") UpperCamelCase : List[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") UpperCamelCase : List[str] = get_tests_dir("""fixtures/dummy-config.json""") class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : str ): a__ : int = 0 def _UpperCamelCase( self : Dict ): a__ : Any = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: a__ : Optional[int] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop("feature_extractor_type" ) a__ : Tuple = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) a__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved a__ : Dict = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Any ): with self.assertRaisesRegex( lowerCamelCase__ , "bert-base is not a local folder and is not a valid model identifier" ): a__ : Tuple = AutoFeatureExtractor.from_pretrained("bert-base" ) def _UpperCamelCase( self : List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="aaaaaa" ) def _UpperCamelCase( self : Union[str, Any] ): with self.assertRaisesRegex( lowerCamelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): a__ : List[Any] = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def _UpperCamelCase( self : Union[str, Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): a__ : str = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): a__ : Tuple = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ ) a__ : List[str] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) a__ : Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def _UpperCamelCase( self : Optional[Any] ): try: AutoConfig.register("custom" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API a__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) a__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def _UpperCamelCase( self : int ): class A__ ( A__ ): """simple docstring""" _lowercase = True try: AutoConfig.register("custom" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local a__ : Dict = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. a__ : List[str] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub a__ : int = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(lowerCamelCase__ , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : """simple docstring""" def __init__( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : int=7 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]=99 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Union[str, Any]=32 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Union[str, Any]=512 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : int=0.02 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : List[Any]="last" , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=0 , ): a__ : int = parent a__ : Optional[Any] = batch_size a__ : Optional[int] = seq_length a__ : Union[str, Any] = is_training a__ : List[str] = use_input_lengths a__ : Optional[Any] = use_token_type_ids a__ : Optional[int] = use_labels a__ : List[Any] = gelu_activation a__ : List[Any] = sinusoidal_embeddings a__ : Optional[int] = causal a__ : Dict = asm a__ : str = n_langs a__ : Optional[Any] = vocab_size a__ : Union[str, Any] = n_special a__ : List[Any] = hidden_size a__ : Dict = num_hidden_layers a__ : str = num_attention_heads a__ : Union[str, Any] = hidden_dropout_prob a__ : str = attention_probs_dropout_prob a__ : int = max_position_embeddings a__ : List[Any] = type_sequence_label_size a__ : List[str] = initializer_range a__ : Union[str, Any] = num_labels a__ : str = num_choices a__ : Union[str, Any] = summary_type a__ : Tuple = use_proj a__ : int = scope a__ : Any = bos_token_id def _UpperCamelCase( self : Any ): a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) a__ : List[Any] = None if self.use_input_lengths: a__ : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length a__ : List[Any] = None if self.use_token_type_ids: a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) a__ : Optional[Any] = None a__ : Tuple = None a__ : str = None if self.use_labels: a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : str = ids_tensor([self.batch_size] , 2 ).float() a__ : str = ids_tensor([self.batch_size] , self.num_choices ) a__ : Tuple = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase( self : Dict ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , ): a__ : Any = XLMModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ , lengths=lowerCamelCase__ , langs=lowerCamelCase__ ) a__ : Optional[int] = model(lowerCamelCase__ , langs=lowerCamelCase__ ) a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ): a__ : Dict = XLMWithLMHeadModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[Any] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , ): a__ : List[Any] = XLMForQuestionAnsweringSimple(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) a__ : str = model(lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ ) a__ : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , ): a__ : List[Any] = XLMForQuestionAnswering(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(lowerCamelCase__ ) a__ : Any = model( lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , cls_index=lowerCamelCase__ , is_impossible=lowerCamelCase__ , p_mask=lowerCamelCase__ , ) a__ : Union[str, Any] = model( lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , cls_index=lowerCamelCase__ , is_impossible=lowerCamelCase__ , ) ((a__), ) : List[Any] = result_with_labels.to_tuple() a__ : List[Any] = model(lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ ) ((a__), ) : List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = XLMForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(lowerCamelCase__ ) a__ : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , ): a__ : int = self.num_labels a__ : Dict = XLMForTokenClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , ): a__ : str = self.num_choices a__ : Union[str, Any] = XLMForMultipleChoice(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Dict = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase( self : Union[str, Any] ): a__ : str = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ) : Tuple = config_and_inputs a__ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class A__ ( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowercase = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowercase = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=False ): a__ : List[Any] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": a__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) a__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) return inputs_dict def _UpperCamelCase( self : Dict ): a__ : List[str] = XLMModelTester(self ) a__ : str = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 ) def _UpperCamelCase( self : Any ): self.config_tester.run_common_tests() def _UpperCamelCase( self : Tuple ): a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase__ ) def _UpperCamelCase( self : Any ): a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase__ ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Dict=1 ): self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual( [isinstance(lowerCamelCase__ , lowerCamelCase__ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase__ ) ) self.assertEqual(len(lowerCamelCase__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCamelCase__ ): # adds PAD dummy token a__ : Tuple = min_length + idx + 1 a__ : str = min_length + idx + 1 a__ : List[Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=1 ): self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual( [isinstance(lowerCamelCase__ , lowerCamelCase__ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase__ ) , ) self.assertEqual(len(lowerCamelCase__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCamelCase__ ): # adds PAD dummy token a__ : Tuple = min_length + idx + 1 a__ : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase__ ) , ) pass @slow def _UpperCamelCase( self : Optional[Any] ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = XLMModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(lowerCamelCase__ ) a__ : Any = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase__ ) # the president a__ : Any = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference a__ : Optional[Any] = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase__ )
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
def UpperCamelCase_ ( __a , __a ) -> int: while b: a__, a__ : int = b, a % b return a def UpperCamelCase_ ( __a , __a ) -> int: return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def UpperCamelCase_ ( ) -> Dict: print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=13 , lowerCamelCase__ : str=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : str=4 , lowerCamelCase__ : str=37 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=512 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : List[Any]=4 , ): a__ : Optional[Any] = parent a__ : List[str] = batch_size a__ : str = seq_length a__ : Optional[int] = is_training a__ : Optional[Any] = use_attention_mask a__ : str = use_token_type_ids a__ : Optional[Any] = use_labels a__ : str = vocab_size a__ : Optional[Any] = hidden_size a__ : str = num_hidden_layers a__ : int = num_attention_heads a__ : Tuple = intermediate_size a__ : List[Any] = hidden_act a__ : Optional[int] = hidden_dropout_prob a__ : Any = attention_probs_dropout_prob a__ : str = max_position_embeddings a__ : Optional[int] = type_vocab_size a__ : Tuple = type_sequence_label_size a__ : Optional[int] = initializer_range a__ : List[Any] = num_choices def _UpperCamelCase( self : Optional[int] ): a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : int = None if self.use_attention_mask: a__ : str = random_attention_mask([self.batch_size, self.seq_length] ) a__ : Dict = None if self.use_token_type_ids: a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : int = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCamelCase( self : Tuple ): a__ : str = self.prepare_config_and_inputs() a__, a__, a__, a__ : Optional[Any] = config_and_inputs a__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _UpperCamelCase( self : str ): a__ : Optional[Any] = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Optional[int] = True a__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = True _lowercase = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCamelCase( self : List[str] ): a__ : Dict = FlaxRobertaPreLayerNormModelTester(self ) @slow def _UpperCamelCase( self : Dict ): for model_class_name in self.all_model_classes: a__ : List[str] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ ) a__ : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class A__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase( self : Dict ): a__ : Any = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ ) a__ : str = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) a__ : int = model(lowerCamelCase__ )[0] a__ : int = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. a__ : str = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Optional[Any] ): a__ : int = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ ) a__ : Any = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) a__ : str = model(lowerCamelCase__ )[0] # compare the actual values for a slice. a__ : Optional[Any] = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
import re from filelock import FileLock try: import nltk UpperCamelCase : Union[str, Any] = True except (ImportError, ModuleNotFoundError): UpperCamelCase : Any = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def UpperCamelCase_ ( __a ) -> str: re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class A__ : """simple docstring""" def __init__( self : str , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple ): logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) a__ : Optional[int] = model a__ : str = kwargs.get("model_save_dir" , lowerCamelCase__ ) a__ : List[str] = kwargs.get("latest_model_name" , lowerCamelCase__ ) def __call__( self : Tuple , **lowerCamelCase__ : Tuple ): a__ : Union[str, Any] = {k: np.array(lowerCamelCase__ ) for k, v in kwargs.items()} return self.model.run(lowerCamelCase__ , lowerCamelCase__ ) @staticmethod def _UpperCamelCase( lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None ): if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) a__ : List[Any] = "CPUExecutionProvider" return ort.InferenceSession(lowerCamelCase__ , providers=[provider] , sess_options=lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : Tuple ): a__ : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME a__ : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name ) a__ : Optional[int] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ ) try: shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) a__ : Optional[int] = self.model_save_dir.joinpath(lowerCamelCase__ ) if src_path.exists(): a__ : Union[str, Any] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ ) try: shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ ) except shutil.SameFileError: pass def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : Optional[Any] , ): if os.path.isfile(lowerCamelCase__ ): logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) # saving model weights/files self._save_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def _UpperCamelCase( cls : List[Any] , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[Union[bool, str, None]] = None , lowerCamelCase__ : Optional[Union[str, None]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional["ort.SessionOptions"] = None , **lowerCamelCase__ : Union[str, Any] , ): a__ : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCamelCase__ ): a__ : Optional[Any] = OnnxRuntimeModel.load_model( os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ ) a__ : List[Any] = Path(lowerCamelCase__ ) # load model from hub else: # download model a__ : Any = hf_hub_download( repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , ) a__ : Optional[int] = Path(lowerCamelCase__ ).parent a__ : Union[str, Any] = Path(lowerCamelCase__ ).name a__ : Union[str, Any] = OnnxRuntimeModel.load_model(lowerCamelCase__ , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ ) return cls(model=lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def _UpperCamelCase( cls : Dict , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : List[str] , ): a__ : List[str] = None if len(str(lowerCamelCase__ ).split("@" ) ) == 2: a__, a__ : Optional[Any] = model_id.split("@" ) return cls._from_pretrained( model_id=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , **lowerCamelCase__ , )
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
from __future__ import annotations import queue class A__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : Any = data a__ : str = None a__ : str = None def UpperCamelCase_ ( ) -> TreeNode: print("\n********Press N to stop entering at any point of time********\n" ) a__ : Union[str, Any] = input("Enter the value of the root node: " ).strip().lower() a__ : queue.Queue = queue.Queue() a__ : List[Any] = TreeNode(int(__a ) ) q.put(__a ) while not q.empty(): a__ : Dict = q.get() a__ : str = f'''Enter the left node of {node_found.data}: ''' a__ : Any = input(__a ).strip().lower() or "n" if check == "n": return tree_node a__ : Any = TreeNode(int(__a ) ) a__ : str = left_node q.put(__a ) a__ : Optional[Any] = f'''Enter the right node of {node_found.data}: ''' a__ : Any = input(__a ).strip().lower() or "n" if check == "n": return tree_node a__ : List[Any] = TreeNode(int(__a ) ) a__ : Tuple = right_node q.put(__a ) raise def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return a__ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): a__ : Union[str, Any] = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return a__ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): a__ : List[Any] = [] while not q.empty(): a__ : Tuple = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__a ) def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return a__ : list[TreeNode] = [] a__ : Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(__a ) a__ : List[Any] = n.left # end of while means current node doesn't have left child a__ : Optional[int] = stack.pop() # start to traverse its right child a__ : List[Any] = n.right def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return a__ : list[TreeNode] = [] a__ : List[str] = node while n or stack: while n: stack.append(__a ) a__ : List[str] = n.left a__ : Dict = stack.pop() print(n.data , end="," ) a__ : str = n.right def UpperCamelCase_ ( __a ) -> None: if not isinstance(__a , __a ) or not node: return a__, a__ : Union[str, Any] = [], [] a__ : Tuple = node stacka.append(__a ) while stacka: # to find the reversed order of post order, store it in stack2 a__ : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def UpperCamelCase_ ( __a = "" , __a=50 , __a="*" ) -> str: if not s: return "\n" + width * char a__, a__ : Any = divmod(width - len(__a ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCamelCase : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : str = { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt_neox' def __init__( self : int , lowerCamelCase__ : int=50_432 , lowerCamelCase__ : Optional[int]=6_144 , lowerCamelCase__ : Optional[Any]=44 , lowerCamelCase__ : Tuple=64 , lowerCamelCase__ : Union[str, Any]=24_576 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Union[str, Any]=0.25 , lowerCamelCase__ : Optional[int]=10_000 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=2_048 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Tuple , ): super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = vocab_size a__ : str = max_position_embeddings a__ : Optional[int] = hidden_size a__ : List[str] = num_hidden_layers a__ : Tuple = num_attention_heads a__ : int = intermediate_size a__ : Optional[int] = hidden_act a__ : str = rotary_pct a__ : Optional[Any] = rotary_emb_base a__ : Any = attention_dropout a__ : Dict = hidden_dropout a__ : Optional[int] = classifier_dropout a__ : Dict = initializer_range a__ : int = layer_norm_eps a__ : Tuple = use_cache a__ : Optional[Any] = tie_word_embeddings a__ : List[str] = use_parallel_residual a__ : str = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _UpperCamelCase( self : Optional[Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) a__ : Dict = self.rope_scaling.get("type" , lowerCamelCase__ ) a__ : List[str] = self.rope_scaling.get("factor" , lowerCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
UpperCamelCase : str = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCamelCase : Optional[int] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : str = logging.get_logger(__name__) def UpperCamelCase_ ( __a , __a=False ) -> Dict: a__ : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" a__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def UpperCamelCase_ ( __a , __a , __a=False ) -> Optional[int]: for i in range(config.num_hidden_layers ): if base_model: a__ : Optional[Any] = "" else: a__ : List[Any] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a__ : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) a__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a__ : List[str] = in_proj_weight[ : config.hidden_size, : ] a__ : List[Any] = in_proj_bias[: config.hidden_size] a__ : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a__ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a__ : Tuple = in_proj_weight[ -config.hidden_size :, : ] a__ : Union[str, Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase_ ( __a , __a , __a ) -> int: a__ : List[str] = dct.pop(__a ) a__ : List[Any] = val def UpperCamelCase_ ( ) -> Optional[Any]: a__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : Dict = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( __a , __a ) -> List[str]: a__ : str = DeiTConfig() # all deit models have fine-tuned heads a__ : Tuple = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size a__ : Optional[Any] = 1_000 a__ : List[str] = "huggingface/label-files" a__ : int = "imagenet-1k-id2label.json" a__ : List[str] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : Union[str, Any] = {int(__a ): v for k, v in idalabel.items()} a__ : Optional[Any] = idalabel a__ : List[str] = {v: k for k, v in idalabel.items()} a__ : Optional[Any] = int(deit_name[-6:-4] ) a__ : Tuple = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): a__ : Dict = 192 a__ : Union[str, Any] = 768 a__ : List[str] = 12 a__ : Union[str, Any] = 3 elif deit_name[9:].startswith("small" ): a__ : int = 384 a__ : int = 1_536 a__ : Optional[int] = 12 a__ : int = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): a__ : List[str] = 1_024 a__ : List[str] = 4_096 a__ : int = 24 a__ : Any = 16 # load original model from timm a__ : List[Any] = timm.create_model(__a , pretrained=__a ) timm_model.eval() # load state_dict of original model, remove and rename some keys a__ : Dict = timm_model.state_dict() a__ : Optional[Any] = create_rename_keys(__a , __a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) read_in_q_k_v(__a , __a , __a ) # load HuggingFace model a__ : Optional[int] = DeiTForImageClassificationWithTeacher(__a ).eval() model.load_state_dict(__a ) # Check outputs on an image, prepared by DeiTImageProcessor a__ : int = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 a__ : Tuple = DeiTImageProcessor(size=__a , crop_size=config.image_size ) a__ : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ) a__ : Optional[int] = encoding["pixel_values"] a__ : Any = model(__a ) a__ : List[str] = timm_model(__a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__a , outputs.logits , atol=1e-3 ) Path(__a ).mkdir(exist_ok=__a ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__a ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCamelCase : Optional[int] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
def UpperCamelCase_ ( __a ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") UpperCamelCase : Union[str, Any] = int(input("""Enter number: """).strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
class A__ : """simple docstring""" def __init__( self : Optional[int] ): a__ : Dict = "" a__ : Optional[Any] = "" a__ : Any = [] def _UpperCamelCase( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: a__ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: a__ : Any = self.__min_dist_top_down_dp(lowerCamelCase__ , n - 1 ) a__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase__ ) a__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) a__ : str = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return self.dp[m][n] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str ): a__ : int = worda a__ : List[str] = worda a__ : Tuple = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )] return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1 , len(lowerCamelCase__ ) - 1 ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str ): a__ : List[Any] = worda a__ : int = worda a__ : int = len(lowerCamelCase__ ) a__ : Optional[Any] = len(lowerCamelCase__ ) a__ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty a__ : Dict = j elif j == 0: # second string is empty a__ : Tuple = i elif worda[i - 1] == worda[j - 1]: # last characters are equal a__ : Tuple = self.dp[i - 1][j - 1] else: a__ : Tuple = self.dp[i][j - 1] a__ : Optional[Any] = self.dp[i - 1][j] a__ : int = self.dp[i - 1][j - 1] a__ : Optional[int] = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return self.dp[m][n] if __name__ == "__main__": UpperCamelCase : Optional[Any] = EditDistance() print("""****************** Testing Edit Distance DP Algorithm ******************""") print() UpperCamelCase : int = input("""Enter the first string: """).strip() UpperCamelCase : Dict = input("""Enter the second string: """).strip() print() print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
from __future__ import annotations def UpperCamelCase_ ( __a , __a ) -> float: a__ : Tuple = sorted(numsa + numsa ) a__, a__ : str = divmod(len(__a ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Dict = [float(x) for x in input("""Enter the elements of first array: """).split()] UpperCamelCase : Tuple = [float(x) for x in input("""Enter the elements of second array: """).split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Tuple=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Optional[int] = image_size a__ : Tuple = patch_size a__ : Union[str, Any] = num_channels a__ : str = is_training a__ : Dict = use_labels a__ : List[str] = hidden_size a__ : Union[str, Any] = num_hidden_layers a__ : Tuple = num_attention_heads a__ : Dict = intermediate_size a__ : List[Any] = hidden_act a__ : int = hidden_dropout_prob a__ : Optional[Any] = attention_probs_dropout_prob a__ : Dict = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : Tuple = scope a__ : List[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) a__ : Optional[Any] = (image_size // patch_size) ** 2 a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : Optional[Any] ): a__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : List[str] = None if self.use_labels: a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : str = self.get_config() return config, pixel_values, labels def _UpperCamelCase( self : Tuple ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : str ): a__ : List[str] = TFDeiTModel(config=lowerCamelCase__ ) a__ : Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ): a__ : str = TFDeiTForMaskedImageModeling(config=lowerCamelCase__ ) a__ : Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images a__ : Dict = 1 a__ : Union[str, Any] = TFDeiTForMaskedImageModeling(lowerCamelCase__ ) a__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : int = model(lowerCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : str ): a__ : Optional[int] = self.type_sequence_label_size a__ : Any = TFDeiTForImageClassification(lowerCamelCase__ ) a__ : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : List[str] = 1 a__ : str = TFDeiTForImageClassification(lowerCamelCase__ ) a__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : List[str] ): a__ : Optional[int] = self.prepare_config_and_inputs() a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _lowercase = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Dict ): a__ : Dict = TFDeiTModelTester(self ) a__ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def _UpperCamelCase( self : List[Any] ): pass def _UpperCamelCase( self : str ): a__, a__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Dense ) ) def _UpperCamelCase( self : List[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : str = model_class(lowerCamelCase__ ) a__ : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : List[Any] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=False ): a__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _UpperCamelCase( self : int ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Optional[Any] = TFDeiTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Tuple: a__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Tuple ): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) a__ : Any = self.default_image_processor a__ : Any = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="tf" ) # forward pass a__ : Dict = model(**lowerCamelCase__ ) # verify the logits a__ : Any = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase : Optional[int] = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
def UpperCamelCase_ ( __a ) -> list: a__ : Union[str, Any] = [0] * len(__a ) for i in range(1 , len(__a ) ): # use last results for better performance - dynamic programming a__ : Dict = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: a__ : List[Any] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 a__ : Any = j return prefix_result def UpperCamelCase_ ( __a ) -> int: return max(prefix_function(__a ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A__ ( A__ ): """simple docstring""" _lowercase = ['image_processor', 'tokenizer'] _lowercase = 'CLIPImageProcessor' _lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Optional[Any] ): a__ : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCamelCase__ , ) a__ : List[Any] = kwargs.pop("feature_extractor" ) a__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self : List[Any] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : str=None , **lowerCamelCase__ : Any ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: a__ : List[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if images is not None: a__ : str = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if text is not None and images is not None: a__ : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Dict ): return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property def _UpperCamelCase( self : List[Any] ): a__ : List[str] = self.tokenizer.model_input_names a__ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCamelCase( self : Optional[Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase__ , ) return self.image_processor_class @property def _UpperCamelCase( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase__ , ) return self.image_processor
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase : Tuple = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" _lowercase = ['input_features', 'attention_mask'] def __init__( self : str , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : int=80 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=True , **lowerCamelCase__ : Optional[int] , ): super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ ) a__ : Dict = num_mel_bins a__ : Optional[int] = do_ceptral_normalize a__ : List[str] = normalize_means a__ : Any = normalize_vars a__ : Optional[Any] = True def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : np.ndarray , ): a__ : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers a__ : Union[str, Any] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ) a__ : str = ta_kaldi.fbank(lowerCamelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _UpperCamelCase( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : int , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: a__ : List[str] = x[:input_length].mean(axis=0 ) a__ : Dict = np.subtract(lowerCamelCase__ , lowerCamelCase__ ) if normalize_vars: a__ : Any = x[:input_length].std(axis=0 ) a__ : List[str] = np.divide(lowerCamelCase__ , lowerCamelCase__ ) if input_length < x.shape[0]: a__ : str = padding_value # make sure array is in float32 a__ : List[Any] = x.astype(np.floataa ) return x def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[np.ndarray] , lowerCamelCase__ : Optional[np.ndarray] = None ): a__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(lowerCamelCase__ , lowerCamelCase__ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(lowerCamelCase__ , lowerCamelCase__ ) ] def __call__( self : str , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : List[Any] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a__ : List[str] = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) a__ : Dict = is_batched_numpy or ( isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a__ : str = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ): a__ : Tuple = np.asarray(lowerCamelCase__ , dtype=np.floataa ) elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a__ : List[str] = [raw_speech] # extract fbank features a__ : Any = [self._extract_fbank_features(lowerCamelCase__ ) for waveform in raw_speech] # convert into correct format for padding a__ : Any = BatchFeature({"input_features": features} ) a__ : Any = self.pad( lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) # make sure list is in array format a__ : List[str] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , lowerCamelCase__ ): a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features] a__ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: a__ : Optional[Any] = ( np.array(lowerCamelCase__ , dtype=np.intaa ) if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD else None ) a__ : Optional[Any] = self.normalize( padded_inputs["input_features"] , attention_mask=lowerCamelCase__ ) if return_tensors is not None: a__ : Optional[Any] = padded_inputs.convert_to_tensors(lowerCamelCase__ ) return padded_inputs
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
from math import sqrt def UpperCamelCase_ ( __a = 1_000_000 ) -> int: a__ : int = 0 a__ : int = 0 a__ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__a , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f"""{solution() = }""")
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class A__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int = None , lowerCamelCase__ : int = None ): super().__init__() a__ : Optional[Any] = pad_token_id a__ : str = max_length a__ : str = vocab a__ : List[Any] = merges a__ : List[Any] = BytePairTokenizer(lowerCamelCase__ , lowerCamelCase__ , sequence_length=lowerCamelCase__ ) @classmethod def _UpperCamelCase( cls : Optional[int] , lowerCamelCase__ : GPTaTokenizer , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ): a__ : Optional[Any] = [" ".join(lowerCamelCase__ ) for m in tokenizer.bpe_ranks.keys()] a__ : Optional[Any] = tokenizer.get_vocab() return cls(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def _UpperCamelCase( cls : Optional[Any] , lowerCamelCase__ : Union[str, os.PathLike] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ): a__ : int = GPTaTokenizer.from_pretrained(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) return cls.from_tokenizer(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def _UpperCamelCase( cls : Optional[int] , lowerCamelCase__ : int ): return cls(**lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int = None ): a__ : Any = self.tf_tokenizer(lowerCamelCase__ ) a__ : Tuple = tf.ones_like(lowerCamelCase__ ) if self.pad_token_id is not None: # pad the tokens up to max length a__ : str = max_length if max_length is not None else self.max_length if max_length is not None: a__, a__ : Any = pad_model_inputs( lowerCamelCase__ , max_seq_length=lowerCamelCase__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device UpperCamelCase : Any = False class A__ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : str ): a__ : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Tuple = "A painting of a squirrel eating a burger " a__ : Any = torch.manual_seed(0 ) a__ : int = pipe( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase__ ) a__ : int = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[int] = generator.manual_seed(0 ) a__ : Optional[int] = pipe( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _UpperCamelCase( self : Tuple ): a__ : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[Any] = "A painting of a squirrel eating a burger " a__ : Dict = torch.manual_seed(0 ) a__ : int = pipe( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images a__ : Union[str, Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) a__ : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase : int = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" _lowercase = ['pixel_values'] def __init__( self : int , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : int = 0.9 , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : Any , ): super().__init__(**lowerCamelCase__ ) a__ : str = size if size is not None else {"shortest_edge": 224} a__ : str = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) a__ : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224} a__ : List[Any] = get_size_dict(lowerCamelCase__ , param_name="crop_size" ) a__ : Optional[Any] = do_resize a__ : str = size a__ : List[Any] = crop_pct a__ : Tuple = resample a__ : int = do_center_crop a__ : Union[str, Any] = crop_size a__ : Any = do_rescale a__ : Optional[int] = rescale_factor a__ : Optional[Any] = do_normalize a__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN a__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _UpperCamelCase( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[float] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : str , ): a__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) if crop_pct is not None: if "shortest_edge" in size: a__ : Dict = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: a__ : Optional[Any] = int(size["height"] / crop_pct ) else: a__ : List[str] = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(lowerCamelCase__ ) ) a__ : Tuple = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ ) else: if "shortest_edge" in size: a__ : int = get_resize_output_image_size(lowerCamelCase__ , size=size["shortest_edge"] , default_to_square=lowerCamelCase__ ) elif "height" in size and "width" in size: a__ : Optional[int] = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(lowerCamelCase__ ) ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ): a__ : List[Any] = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ): return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ): return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : int = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : str , ): a__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize a__ : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct a__ : int = resample if resample is not None else self.resample a__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop a__ : int = do_rescale if do_rescale is not None else self.do_rescale a__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor a__ : int = do_normalize if do_normalize is not None else self.do_normalize a__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean a__ : List[Any] = image_std if image_std is not None else self.image_std a__ : Tuple = size if size is not None else self.size a__ : Union[str, Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) a__ : Optional[int] = crop_size if crop_size is not None else self.crop_size a__ : List[str] = get_size_dict(lowerCamelCase__ , param_name="crop_size" ) a__ : Union[str, Any] = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. a__ : Optional[int] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: a__ : Any = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images] if do_center_crop: a__ : Tuple = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images] if do_rescale: a__ : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images] if do_normalize: a__ : Optional[int] = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images] a__ : Tuple = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images] a__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
def UpperCamelCase_ ( __a ) -> float: return 10 - x * x def UpperCamelCase_ ( __a , __a ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(__a ) * equation(__a ) >= 0: raise ValueError("Wrong space!" ) a__ : Any = a while (b - a) >= 0.01: # Find middle point a__ : str = (a + b) / 2 # Check if middle point is root if equation(__a ) == 0.0: break # Decide the side to repeat the steps if equation(__a ) * equation(__a ) < 0: a__ : Dict = c else: a__ : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A__ ( A__ ): """simple docstring""" _lowercase = 42 _lowercase = 42 def __init__( self : str , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : KarrasVeScheduler ): super().__init__() self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) @torch.no_grad() def __call__( self : Dict , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Optional[int] , ): a__ : int = self.unet.config.sample_size a__ : Any = (batch_size, 3, img_size, img_size) a__ : List[Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) a__ : List[Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(lowerCamelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper a__ : Dict = self.scheduler.schedule[t] a__ : List[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat a__, a__ : List[str] = self.scheduler.add_noise_to_input(lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. a__ : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev a__ : Any = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. a__ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample a__ : Optional[int] = self.scheduler.step_correct( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , step_output.prev_sample , step_output["derivative"] , ) a__ : str = step_output.prev_sample a__ : int = (sample / 2 + 0.5).clamp(0 , 1 ) a__ : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a__ : str = self.numpy_to_pil(lowerCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase__ )
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase_ ( ) -> Optional[Any]: a__, a__ : Tuple = 9, 14 # noqa: F841 a__ : int = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] a__ : str = defaultdict(__a ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) a__ : Union[str, Any] = mst(__a ) a__ : str = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: a__ : List[str] = tuple(answer[:2] ) a__ : Any = tuple(edge[::-1] ) assert edge in result or reverse in result
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
from __future__ import annotations def UpperCamelCase_ ( __a , __a ) -> list[int]: a__ : Dict = 0 a__ : Any = len(__a ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: a__ : Optional[int] = i + 1 else: a__ : Optional[Any] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Dict=18 , lowerCamelCase__ : List[str]=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : List[Any]=True , ): a__ : Dict = size if size is not None else {"height": 18, "width": 18} a__ : Union[str, Any] = parent a__ : Dict = batch_size a__ : Union[str, Any] = num_channels a__ : Optional[int] = image_size a__ : Tuple = min_resolution a__ : Dict = max_resolution a__ : Tuple = do_resize a__ : int = size a__ : List[str] = apply_ocr def _UpperCamelCase( self : List[Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _UpperCamelCase( self : Dict ): a__ : Any = LayoutLMvaImageProcessingTester(self ) @property def _UpperCamelCase( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase( self : Optional[int] ): a__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "apply_ocr" ) ) def _UpperCamelCase( self : Any ): a__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) a__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): # Initialize image_processing a__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , lowerCamelCase__ ) self.assertIsInstance(encoding.boxes , lowerCamelCase__ ) # Test batched a__ : Union[str, Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase( self : int ): # Initialize image_processing a__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input a__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : Union[str, Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase( self : Optional[Any] ): # Initialize image_processing a__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input a__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : int = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase( self : Union[str, Any] ): # with apply_OCR = True a__ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset a__ : int = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) a__ : List[str] = Image.open(ds[0]["file"] ).convert("RGB" ) a__ : Any = image_processing(lowerCamelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a__ : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 a__ : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowerCamelCase__ ) self.assertListEqual(encoding.boxes , lowerCamelCase__ ) # with apply_OCR = False a__ : List[str] = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) a__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : str = { """xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""", """xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""", """xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""", """xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""", """xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""", """xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""", """xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""", """xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""", """xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""", """xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""", } class A__ ( A__ ): """simple docstring""" _lowercase = 'xlm' _lowercase = { 'hidden_size': 'emb_dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', 'n_words': 'vocab_size', # For backward compatibility } def __init__( self : Tuple , lowerCamelCase__ : List[Any]=30_145 , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : int=12 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=512 , lowerCamelCase__ : List[str]=2_048**-0.5 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Any=1 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]="first" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : List[str] , ): a__ : List[str] = vocab_size a__ : str = emb_dim a__ : List[str] = n_layers a__ : Union[str, Any] = n_heads a__ : Any = dropout a__ : int = attention_dropout a__ : List[str] = gelu_activation a__ : Optional[Any] = sinusoidal_embeddings a__ : Optional[int] = causal a__ : Optional[int] = asm a__ : int = n_langs a__ : Tuple = use_lang_emb a__ : str = layer_norm_eps a__ : Tuple = bos_index a__ : int = eos_index a__ : Optional[int] = pad_index a__ : List[str] = unk_index a__ : List[Any] = mask_index a__ : List[str] = is_encoder a__ : List[Any] = max_position_embeddings a__ : Union[str, Any] = embed_init_std a__ : Tuple = init_std a__ : List[str] = summary_type a__ : List[str] = summary_use_proj a__ : Dict = summary_activation a__ : str = summary_proj_to_labels a__ : Dict = summary_first_dropout a__ : List[str] = start_n_top a__ : List[Any] = end_n_top a__ : Dict = mask_token_id a__ : Optional[Any] = lang_id if "n_words" in kwargs: a__ : Tuple = kwargs["n_words"] super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" @property def _UpperCamelCase( self : Dict ): if self.task == "multiple-choice": a__ : str = {0: "batch", 1: "choice", 2: "sequence"} else: a__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ): self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : List[str] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(lowerCamelCase__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def _UpperCamelCase( self : List[Any] ): a__ : Tuple = None ops.enable_eager_execution_internal() a__ : Dict = tf.config.list_physical_devices("CPU" ) if len(lowerCamelCase__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) a__ : Optional[int] = tf.config.list_logical_devices(device_type="CPU" ) a__ : Tuple = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): a__ : List[Any] = GradientAccumulator() a__ : str = tf.Variable([4.0, 3.0] ) a__, a__ : Union[str, Any] = create_optimizer(5E-5 , 10 , 5 ) a__ : Optional[int] = tf.Variable([0.0, 0.0] , trainable=lowerCamelCase__ ) def accumulate_on_replica(lowerCamelCase__ : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ): with strategy.scope(): a__ : Tuple = strategy.experimental_local_results(lowerCamelCase__ ) local_variables[0].assign(lowerCamelCase__ ) local_variables[1].assign(lowerCamelCase__ ) strategy.run(lowerCamelCase__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(lowerCamelCase__ ) def _check_local_values(lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ): a__ : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , lowerCamelCase__ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , lowerCamelCase__ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
UpperCamelCase : Optional[int] = tuple[float, float, float] UpperCamelCase : str = tuple[float, float, float] def UpperCamelCase_ ( __a , __a ) -> Vectorad: a__ : List[str] = end_pointa[0] - end_pointa[0] a__ : List[Any] = end_pointa[1] - end_pointa[1] a__ : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def UpperCamelCase_ ( __a , __a ) -> Vectorad: a__ : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i a__ : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j a__ : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def UpperCamelCase_ ( __a , __a ) -> bool: return tuple(round(__a , __a ) for x in vector ) == (0, 0, 0) def UpperCamelCase_ ( __a , __a , __a , __a = 10 ) -> bool: a__ : Dict = create_vector(__a , __a ) a__ : Union[str, Any] = create_vector(__a , __a ) return is_zero_vector(get_ad_vectors_cross(__a , __a ) , __a )
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
def UpperCamelCase_ ( __a ) -> list[int]: if length <= 0 or not isinstance(__a , __a ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__a )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef UpperCamelCase : int = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def UpperCamelCase_ ( __a , __a ) -> str: warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def UpperCamelCase_ ( __a , __a ) -> List[Any]: warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) a__ : Optional[Any] = simple_accuracy(__a , __a ) a__ : Any = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCamelCase_ ( __a , __a ) -> str: warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) a__ : Union[str, Any] = pearsonr(__a , __a )[0] a__ : int = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCamelCase_ ( __a , __a , __a ) -> Tuple: warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), f'''Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def UpperCamelCase_ ( __a , __a , __a ) -> Tuple: warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCamelCase : List[Any] = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCamelCase : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def UpperCamelCase_ ( __a ) -> str: if "://" in dataset_path: a__ : Optional[int] = dataset_path.split("://" )[1] return dataset_path def UpperCamelCase_ ( __a ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = not is_remote_filesystem(__a ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) ) else: fs.mv(__a , __a , recursive=__a ) def UpperCamelCase_ ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: a__ : List[str] = None a__ : Any = None a__ : List[Any] = threading.Lock()
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A__ : """simple docstring""" @staticmethod def _UpperCamelCase( *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ): pass @is_pipeline_test @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @require_torch def _UpperCamelCase( self : Optional[int] ): a__ : int = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a__ : Optional[int] = image_classifier(lowerCamelCase__ , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(lowerCamelCase__ ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) a__ : Tuple = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [ [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], ] , ) @require_tf def _UpperCamelCase( self : Optional[int] ): a__ : List[str] = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) a__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a__ : str = image_classifier(lowerCamelCase__ , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) a__ : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [ [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, {"score": 0.333, "label": ANY(lowerCamelCase__ )}, ], ] , ) @slow @require_torch def _UpperCamelCase( self : str ): a__ : List[Any] = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes a__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a__ : Any = image_classifier(lowerCamelCase__ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) a__ : int = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _UpperCamelCase( self : Optional[int] ): a__ : Dict = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes a__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a__ : int = image_classifier(lowerCamelCase__ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) a__ : List[Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
from __future__ import annotations from collections import namedtuple def UpperCamelCase_ ( __a , __a , __a ) -> tuple: a__ : Union[str, Any] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : Optional[int] = { """google/realm-cc-news-pretrained-embedder""": 512, """google/realm-cc-news-pretrained-encoder""": 512, """google/realm-cc-news-pretrained-scorer""": 512, """google/realm-cc-news-pretrained-openqa""": 512, """google/realm-orqa-nq-openqa""": 512, """google/realm-orqa-nq-reader""": 512, """google/realm-orqa-wq-openqa""": 512, """google/realm-orqa-wq-reader""": 512, } UpperCamelCase : List[str] = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_INIT_CONFIGURATION _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = RealmTokenizer def __init__( self : str , lowerCamelCase__ : str=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any="[UNK]" , lowerCamelCase__ : Optional[int]="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : Optional[int]="[CLS]" , lowerCamelCase__ : Tuple="[MASK]" , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : str , ): super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars ): a__ : Optional[Any] = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) ) a__ : List[str] = do_lower_case a__ : List[Any] = strip_accents a__ : Optional[Any] = tokenize_chinese_chars a__ : Optional[int] = normalizer_class(**lowerCamelCase__ ) a__ : Union[str, Any] = do_lower_case def _UpperCamelCase( self : Any , lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : List[str] = PaddingStrategy.MAX_LENGTH a__ : Dict = text a__ : Optional[int] = kwargs.pop("text_pair" , lowerCamelCase__ ) a__ : Dict = kwargs.pop("return_tensors" , lowerCamelCase__ ) a__ : List[Any] = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(lowerCamelCase__ ): if batch_text_pair is not None: a__ : Dict = batch_text_pair[idx] else: a__ : Union[str, Any] = None a__ : int = super().__call__(lowerCamelCase__ , lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) a__ : Union[str, Any] = encoded_candidates.get("input_ids" ) a__ : List[Any] = encoded_candidates.get("attention_mask" ) a__ : int = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(lowerCamelCase__ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowerCamelCase__ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowerCamelCase__ ) a__ : Union[str, Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase__ ) != 0} return BatchEncoding(lowerCamelCase__ , tensor_type=lowerCamelCase__ ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str=None ): a__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[Any] = [self.sep_token_id] a__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class A__ ( A__ ): """simple docstring""" _lowercase = 42 _lowercase = 42 _lowercase = None class A__ ( A__ , A__ ): """simple docstring""" _lowercase = 2 @register_to_config def __init__( self : List[Any] , lowerCamelCase__ : float = 0.02 , lowerCamelCase__ : float = 100 , lowerCamelCase__ : float = 1.007 , lowerCamelCase__ : float = 80 , lowerCamelCase__ : float = 0.05 , lowerCamelCase__ : float = 50 , ): # standard deviation of the initial noise distribution a__ : Union[str, Any] = sigma_max # setable values a__ : int = None a__ : np.IntTensor = None a__ : torch.FloatTensor = None # sigma(t_i) def _UpperCamelCase( self : Dict , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[int] = None ): return sample def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, torch.device] = None ): a__ : int = num_inference_steps a__ : Optional[int] = np.arange(0 , self.num_inference_steps )[::-1].copy() a__ : List[Any] = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ ) a__ : Optional[int] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] a__ : Dict = torch.tensor(lowerCamelCase__ , dtype=torch.floataa , device=lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float , lowerCamelCase__ : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: a__ : Tuple = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: a__ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) a__ : Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase__ ).to(sample.device ) a__ : str = sigma + gamma * sigma a__ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase( self : Dict , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = True , ): a__ : str = sample_hat + sigma_hat * model_output a__ : str = (sample_hat - pred_original_sample) / sigma_hat a__ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ ) def _UpperCamelCase( self : Any , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = True , ): a__ : int = sample_prev + sigma_prev * model_output a__ : Any = (sample_prev - pred_original_sample) / sigma_prev a__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ): raise NotImplementedError()
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
from typing import Any class A__ : """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase__ : Any ): a__ : List[str] = data a__ : List[Any] = None def __repr__( self : Tuple ): return f'''Node({self.data})''' class A__ : """simple docstring""" def __init__( self : Dict ): a__ : Union[str, Any] = None def __iter__( self : List[Any] ): a__ : List[str] = self.head while node: yield node.data a__ : List[Any] = node.next def __len__( self : Optional[Any] ): return sum(1 for _ in self ) def __repr__( self : List[Any] ): return "->".join([str(lowerCamelCase__ ) for item in self] ) def __getitem__( self : Dict , lowerCamelCase__ : int ): if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): if not 0 <= index < len(self ): raise ValueError("list index out of range." ) a__ : int = self.head for _ in range(lowerCamelCase__ ): a__ : Union[str, Any] = current.next a__ : Optional[int] = data def _UpperCamelCase( self : Any , lowerCamelCase__ : Any ): self.insert_nth(len(self ) , lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any ): self.insert_nth(0 , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) a__ : Union[str, Any] = Node(lowerCamelCase__ ) if self.head is None: a__ : List[Any] = new_node elif index == 0: a__ : str = self.head # link new_node to head a__ : int = new_node else: a__ : List[Any] = self.head for _ in range(index - 1 ): a__ : Optional[int] = temp.next a__ : Optional[int] = temp.next a__ : Dict = new_node def _UpperCamelCase( self : List[Any] ): # print every node data print(self ) def _UpperCamelCase( self : List[str] ): return self.delete_nth(0 ) def _UpperCamelCase( self : Dict ): # delete from tail return self.delete_nth(len(self ) - 1 ) def _UpperCamelCase( self : Any , lowerCamelCase__ : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) a__ : Optional[int] = self.head # default first node if index == 0: a__ : Tuple = self.head.next else: a__ : List[Any] = self.head for _ in range(index - 1 ): a__ : int = temp.next a__ : Tuple = temp.next a__ : int = temp.next.next return delete_node.data def _UpperCamelCase( self : Optional[int] ): return self.head is None def _UpperCamelCase( self : Optional[int] ): a__ : Any = None a__ : List[Any] = self.head while current: # Store the current node's next node. a__ : Tuple = current.next # Make the current node's next point backwards a__ : int = prev # Make the previous node be the current node a__ : Optional[Any] = current # Make the current node the next node (to progress iteration) a__ : Tuple = next_node # Return prev in order to put the head at the end a__ : Optional[Any] = prev def UpperCamelCase_ ( ) -> None: a__ : Tuple = LinkedList() assert linked_list.is_empty() is True assert str(__a ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__a ) == i linked_list.insert_nth(__a , i + 1 ) assert str(__a ) == "->".join(str(__a ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__a ) == "->".join(str(__a ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__a ) == 9 assert str(__a ) == "->".join(str(__a ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): a__ : int = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__a ) == "->".join(str(__a ) for i in range(-8 , 1 ) ) def UpperCamelCase_ ( ) -> None: a__ : Union[str, Any] = [ -9, 100, Node(77_345_112 ), "dlrow olleH", 7, 5_555, 0, -192.55555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] a__ : Any = LinkedList() for i in test_input: linked_list.insert_tail(__a ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__a ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head a__ : int = linked_list.delete_head() assert result == -9 assert ( str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail a__ : List[Any] = linked_list.delete_tail() assert result == 12.2 assert ( str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list a__ : List[str] = linked_list.delete_nth(10 ) assert result is None assert ( str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(__a ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__a ) assert ( str(__a ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__a ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def UpperCamelCase_ ( ) -> Tuple: from doctest import testmod testmod() a__ : Any = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(__a ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) a__ : Optional[int] = input("Enter New Value: " ).strip() print("New list:" ) print(__a ) print(f'''length of linked_list is : {len(__a )}''' ) if __name__ == "__main__": main()
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
def UpperCamelCase_ ( ) -> List[Any]: a__ : Optional[int] = [] a__ : Dict = 1 while len(__a ) < 1e6: constant.append(str(__a ) ) i += 1 a__ : Dict = "".join(__a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9_999] ) * int(constant[99_999] ) * int(constant[999_999] ) ) if __name__ == "__main__": print(solution())
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
from sklearn.metrics import mean_squared_error import datasets UpperCamelCase : Any = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ UpperCamelCase : Optional[Any] = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ UpperCamelCase : Optional[Any] = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): """simple docstring""" def _UpperCamelCase( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _UpperCamelCase( self : Optional[int] ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _UpperCamelCase( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Dict="uniform_average" , lowerCamelCase__ : Tuple=True ): a__ : int = mean_squared_error( lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ , multioutput=lowerCamelCase__ , squared=lowerCamelCase__ ) return {"mse": mse}
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def UpperCamelCase_ ( ) -> Optional[int]: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join a__ : Dict = "__test_patch_submodule_mock__" with patch_submodule(_test_patching , "os.path.join" , __a ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def UpperCamelCase_ ( ) -> Optional[Any]: assert _test_patching.open is open a__ : Dict = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , "open" , __a ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def UpperCamelCase_ ( ) -> Tuple: # pandas.read_csv is not present in _test_patching a__ : List[str] = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching , "pandas.read_csv" , __a ): pass def UpperCamelCase_ ( ) -> int: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point a__ : List[str] = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , "len" , __a ) is None with patch_submodule(_test_patching , "len" , __a ): assert _test_patching.len is mock assert _test_patching.len is len def UpperCamelCase_ ( ) -> List[str]: a__ : Tuple = "__test_patch_submodule_start_and_stop_mock__" a__ : Dict = patch_submodule(_test_patching , "open" , __a ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def UpperCamelCase_ ( ) -> Union[str, Any]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join a__ : List[Any] = "__test_patch_submodule_successive_join__" a__ : Tuple = "__test_patch_submodule_successive_dirname__" a__ : Dict = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , "os.path.join" , __a ): with patch_submodule(_test_patching , "os.rename" , __a ): with patch_submodule(_test_patching , "os.path.dirname" , __a ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , "os.rename" , __a ): with patch_submodule(_test_patching , "os.path.join" , __a ): with patch_submodule(_test_patching , "os.path.dirname" , __a ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def UpperCamelCase_ ( ) -> List[str]: a__ : Tuple = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __a ): pass with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __a ): pass
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""", # See all XGLM models at https://huggingface.co/models?filter=xglm } class A__ ( A__ ): """simple docstring""" _lowercase = 'xglm' _lowercase = ['past_key_values'] _lowercase = { 'num_attention_heads': 'attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'num_layers', } def __init__( self : List[str] , lowerCamelCase__ : Optional[Any]=256_008 , lowerCamelCase__ : Dict=2_048 , lowerCamelCase__ : Optional[int]=1_024 , lowerCamelCase__ : Any=4_096 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Tuple=16 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : int=2 , **lowerCamelCase__ : Optional[int] , ): a__ : int = vocab_size a__ : int = max_position_embeddings a__ : Union[str, Any] = d_model a__ : List[Any] = ffn_dim a__ : Optional[int] = num_layers a__ : int = attention_heads a__ : Any = activation_function a__ : Union[str, Any] = dropout a__ : List[Any] = attention_dropout a__ : Union[str, Any] = activation_dropout a__ : Union[str, Any] = layerdrop a__ : Any = init_std a__ : int = scale_embedding # scale factor will be sqrt(d_model) if True a__ : List[str] = use_cache super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
def UpperCamelCase_ ( __a , __a ) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a__ : Optional[Any] = str(bin(__a ) )[2:] # remove the leading "0b" a__ : Optional[int] = str(bin(__a ) )[2:] # remove the leading "0b" a__ : Optional[Any] = max(len(__a ) , len(__a ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def UpperCamelCase_ ( __a ) -> Dict: if is_torch_version("<" , "2.0.0" ) or not hasattr(__a , "_dynamo" ): return False return isinstance(__a , torch._dynamo.eval_frame.OptimizedModule ) def UpperCamelCase_ ( __a , __a = True ) -> Tuple: a__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) a__ : List[Any] = is_compiled_module(__a ) if is_compiled: a__ : Optional[int] = model a__ : List[str] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__a , __a ): a__ : int = model.module if not keep_fpaa_wrapper: a__ : Union[str, Any] = getattr(__a , "forward" ) a__ : Union[str, Any] = model.__dict__.pop("_original_forward" , __a ) if original_forward is not None: while hasattr(__a , "__wrapped__" ): a__ : int = forward.__wrapped__ if forward == original_forward: break a__ : Any = forward if getattr(__a , "_converted_to_transformer_engine" , __a ): convert_model(__a , to_transformer_engine=__a ) if is_compiled: a__ : List[str] = model a__ : Optional[int] = compiled_model return model def UpperCamelCase_ ( ) -> int: PartialState().wait_for_everyone() def UpperCamelCase_ ( __a , __a ) -> int: if PartialState().distributed_type == DistributedType.TPU: xm.save(__a , __a ) elif PartialState().local_process_index == 0: torch.save(__a , __a ) @contextmanager def UpperCamelCase_ ( **__a ) -> Optional[int]: for key, value in kwargs.items(): a__ : int = str(__a ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def UpperCamelCase_ ( __a ) -> Dict: if not hasattr(__a , "__qualname__" ) and not hasattr(__a , "__name__" ): a__ : Union[str, Any] = getattr(__a , "__class__" , __a ) if hasattr(__a , "__qualname__" ): return obj.__qualname__ if hasattr(__a , "__name__" ): return obj.__name__ return str(__a ) def UpperCamelCase_ ( __a , __a ) -> str: for key, value in source.items(): if isinstance(__a , __a ): a__ : Any = destination.setdefault(__a , {} ) merge_dicts(__a , __a ) else: a__ : List[str] = value return destination def UpperCamelCase_ ( __a = None ) -> bool: if port is None: a__ : int = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) UpperCamelCase : Tuple = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class A__ ( A__ ): """simple docstring""" _lowercase = 'donut-swin' _lowercase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , lowerCamelCase__ : Optional[Any]=224 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Union[str, Any]=96 , lowerCamelCase__ : Optional[int]=[2, 2, 6, 2] , lowerCamelCase__ : str=[3, 6, 12, 24] , lowerCamelCase__ : List[str]=7 , lowerCamelCase__ : int=4.0 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Dict=1E-5 , **lowerCamelCase__ : Optional[int] , ): super().__init__(**lowerCamelCase__ ) a__ : Tuple = image_size a__ : str = patch_size a__ : Tuple = num_channels a__ : Any = embed_dim a__ : int = depths a__ : List[str] = len(lowerCamelCase__ ) a__ : Optional[Any] = num_heads a__ : Dict = window_size a__ : Optional[int] = mlp_ratio a__ : List[Any] = qkv_bias a__ : Tuple = hidden_dropout_prob a__ : int = attention_probs_dropout_prob a__ : Union[str, Any] = drop_path_rate a__ : List[str] = hidden_act a__ : Optional[Any] = use_absolute_embeddings a__ : Union[str, Any] = layer_norm_eps a__ : Optional[int] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a__ : Any = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase : str = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCamelCase : Any = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def UpperCamelCase_ ( ) -> Dict: a__ : Any = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) a__ : Tuple = bs[:] a__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(__a ) cs.append(2**8 + n ) n += 1 a__ : str = [chr(__a ) for n in cs] return dict(zip(__a , __a ) ) def UpperCamelCase_ ( __a ) -> Dict: a__ : int = set() a__ : int = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a__ : str = char return pairs class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Dict="<s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Optional[int]="<pad>" , lowerCamelCase__ : Dict="<mask>" , lowerCamelCase__ : Any=False , **lowerCamelCase__ : Optional[int] , ): a__ : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token a__ : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token a__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token a__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token a__ : Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a__ : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , ) with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle: a__ : int = json.load(lowerCamelCase__ ) a__ : Optional[int] = {v: k for k, v in self.encoder.items()} a__ : int = errors # how to handle errors in decoding a__ : Union[str, Any] = bytes_to_unicode() a__ : int = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase__ , encoding="utf-8" ) as merges_handle: a__ : str = merges_handle.read().split("\n" )[1:-1] a__ : Dict = [tuple(merge.split() ) for merge in bpe_merges] a__ : List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[int] = {} a__ : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a__ : List[str] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _UpperCamelCase( self : Union[str, Any] ): return len(self.encoder ) def _UpperCamelCase( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Dict ): if token in self.cache: return self.cache[token] a__ : Optional[Any] = tuple(lowerCamelCase__ ) a__ : Optional[int] = get_pairs(lowerCamelCase__ ) if not pairs: return token while True: a__ : List[str] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a__, a__ : Union[str, Any] = bigram a__ : Any = [] a__ : Union[str, Any] = 0 while i < len(lowerCamelCase__ ): try: a__ : List[str] = word.index(lowerCamelCase__ , lowerCamelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a__ : Optional[int] = j if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a__ : Dict = tuple(lowerCamelCase__ ) a__ : int = new_word if len(lowerCamelCase__ ) == 1: break else: a__ : Optional[int] = get_pairs(lowerCamelCase__ ) a__ : List[str] = " ".join(lowerCamelCase__ ) a__ : List[Any] = word return word def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Tuple ): a__ : Optional[int] = [] for token in re.findall(self.pat , lowerCamelCase__ ): a__ : str = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(" " ) ) return bpe_tokens def _UpperCamelCase( self : str , lowerCamelCase__ : Union[str, Any] ): return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] ): return self.decoder.get(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Dict ): a__ : List[Any] = "".join(lowerCamelCase__ ) a__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): if not os.path.isdir(lowerCamelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return a__ : str = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a__ : Optional[int] = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" ) a__ : int = 0 with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) a__ : List[str] = token_index writer.write(" ".join(lowerCamelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ : Any = [self.cls_token_id] a__ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1] def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : int = [self.sep_token_id] a__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=False , **lowerCamelCase__ : Union[str, Any] ): a__ : int = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()): a__ : Any = " " + text return (text, kwargs)
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") UpperCamelCase : str = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field( default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) _lowercase = field( default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , ) _lowercase = field( default=1_0_2_4 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) _lowercase = field( default=A__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) _lowercase = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _lowercase = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) _lowercase = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'A csv or a json file containing the training data.'} ) _lowercase = field( default=A__ , metadata={'help': 'A csv or a json file containing the validation data.'} ) _lowercase = field(default=A__ , metadata={'help': 'A csv or a json file containing the test data.'} ) def _UpperCamelCase( self : int ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: a__ : Dict = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." a__ : int = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class A__ : """simple docstring""" _lowercase = field( default=A__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _lowercase = field( default=A__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) _lowercase = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _lowercase = field( default=A__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def UpperCamelCase_ ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a__, a__, a__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a__, a__, a__ : Optional[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) a__ : Tuple = training_args.get_process_log_level() logger.setLevel(__a ) datasets.utils.logging.set_verbosity(__a ) transformers.utils.logging.set_verbosity(__a ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. a__ : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a__ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. a__ : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. a__ : str = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: a__ : Any = data_args.train_file.split("." )[-1] a__ : Dict = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." a__ : Tuple = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files a__ : List[str] = load_dataset("csv" , data_files=__a , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files a__ : Tuple = load_dataset("json" , data_files=__a , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels a__ : Dict = raw_datasets["train"].features["label"].names a__ : Optional[Any] = len(__a ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer a__ : Union[str, Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__a , ) a__ : Any = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: a__ : Optional[Any] = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch a__ : Optional[Any] = False # Some models have set the order of the labels to use, so let's make sure we do use it. a__ : Dict = {"Refused": 0, "Entailed": 1} a__ : List[Any] = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) a__ : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__a ): # Tokenize the texts def _convert_table_text_to_pandas(__a ): a__ : List[str] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] a__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd a__ : str = examples["statement"] a__ : Tuple = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) a__ : str = tokenizer(__a , __a , padding=__a , max_length=__a , truncation=__a ) a__ : Tuple = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): a__ : str = raw_datasets.map( __a , batched=__a , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) a__ : Optional[Any] = raw_datasets["train"] if data_args.max_train_samples is not None: a__ : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) a__ : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: a__ : int = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) a__ : Dict = raw_datasets["test"] if data_args.max_predict_samples is not None: a__ : Union[str, Any] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__a ) ) , 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__a ): a__ : Dict = p.predictions[0] if isinstance(p.predictions , __a ) else p.predictions a__ : Optional[Any] = np.argmax(__a , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: a__ : Optional[int] = default_data_collator elif training_args.fpaa: a__ : int = DataCollatorWithPadding(__a , pad_to_multiple_of=8 ) else: a__ : Any = None # Initialize our Trainer a__ : Optional[Any] = Trainer( model=__a , args=__a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__a , tokenizer=__a , data_collator=__a , ) # Training if training_args.do_train: a__ : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: a__ : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: a__ : Tuple = last_checkpoint a__ : Union[str, Any] = trainer.train(resume_from_checkpoint=__a ) a__ : int = train_result.metrics a__ : Union[str, Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__a ) ) a__ : Optional[int] = min(__a , len(__a ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , __a ) trainer.save_metrics("train" , __a ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Any = trainer.evaluate(eval_dataset=__a ) a__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__a ) a__ : Optional[int] = min(__a , len(__a ) ) trainer.log_metrics("eval" , __a ) trainer.save_metrics("eval" , __a ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. a__ : Optional[Any] = predict_dataset.remove_columns("label" ) a__ : List[str] = trainer.predict(__a , metric_key_prefix="predict" ).predictions a__ : Dict = np.argmax(__a , axis=1 ) a__ : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(__a , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(__a ): a__ : Union[str, Any] = label_list[item] writer.write(f'''{index}\t{item}\n''' ) a__ : Any = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**__a ) else: trainer.create_model_card(**__a ) def UpperCamelCase_ ( __a ) -> int: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
from math import ceil def UpperCamelCase_ ( __a = 1_001 ) -> int: a__ : Optional[Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): a__ : List[str] = 2 * i + 1 a__ : Optional[int] = 2 * i a__ : Dict = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: UpperCamelCase : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase : List[str] = { """configuration_bridgetower""": [ """BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BridgeTowerConfig""", """BridgeTowerTextConfig""", """BridgeTowerVisionConfig""", ], """processing_bridgetower""": ["""BridgeTowerProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = ["""BridgeTowerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = [ """BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""", """BridgeTowerForContrastiveLearning""", """BridgeTowerForImageAndTextRetrieval""", """BridgeTowerForMaskedLM""", """BridgeTowerModel""", """BridgeTowerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""", datefmt="""%Y-%m-%d %H:%M:%S""", level=os.environ.get("""LOGLEVEL""", """INFO""").upper(), stream=sys.stdout, ) UpperCamelCase : List[str] = logging.getLogger(__name__) UpperCamelCase : Optional[int] = {"""facebook/bart-base""": BartForConditionalGeneration} UpperCamelCase : Optional[int] = {"""facebook/bart-base""": BartTokenizer} def UpperCamelCase_ ( ) -> Optional[Any]: a__ : Optional[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=__a , default=__a , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=__a , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=__a , default=__a , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=__a , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__a , ) parser.add_argument( "--config_name" , type=__a , default=__a , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=__a , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=__a , default=__a , help="Where to store the final ONNX file." ) a__ : str = parser.parse_args() return args def UpperCamelCase_ ( __a , __a="cpu" ) -> List[Any]: a__ : Any = model_dict[model_name].from_pretrained(__a ).to(__a ) a__ : List[str] = tokenizer_dict[model_name].from_pretrained(__a ) if model_name in ["facebook/bart-base"]: a__ : Optional[int] = 0 a__ : Union[str, Any] = None a__ : Union[str, Any] = 0 return huggingface_model, tokenizer def UpperCamelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]: model.eval() a__ : int = None a__ : str = torch.jit.script(BARTBeamSearchGenerator(__a ) ) with torch.no_grad(): a__ : int = "My friends are cool but they eat too many carbs." a__ : str = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device ) a__ : int = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__a , max_length=__a , early_stopping=__a , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( __a , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , __a , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=__a , ) logger.info("Model exported to {}".format(__a ) ) a__ : List[str] = remove_dup_initializers(os.path.abspath(__a ) ) logger.info("Deduplicated and optimized model written to {}".format(__a ) ) a__ : Dict = onnxruntime.InferenceSession(__a ) a__ : Tuple = ort_sess.run( __a , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(__a ), "max_length": np.array(__a ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def UpperCamelCase_ ( ) -> Tuple: a__ : int = parse_args() a__ : str = 5 a__ : str = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() a__ : Optional[int] = torch.device(args.device ) a__, a__ : int = load_model_tokenizer(args.model_name_or_path , __a ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(__a ) if args.max_length: a__ : Optional[int] = args.max_length if args.num_beams: a__ : int = args.num_beams if args.output_file_path: a__ : str = args.output_file_path else: a__ : Optional[Any] = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(__a , __a , __a , __a , __a ) if __name__ == "__main__": main()
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) UpperCamelCase : Union[str, Any] = getLogger(__name__) def UpperCamelCase_ ( __a , __a , __a , __a = 8 , __a = 1_024 , __a="val" , __a=None , __a=False , __a="summarization" , __a=None , __a=1 , __a = None , __a="" , **__a , ) -> Dict: a__ : List[str] = str(__a ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__a ) a__ : Tuple = Path(__a ) a__ : Tuple = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(__a ) a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(__a ).cuda() if fpaa: a__ : List[str] = model.half() # determine if we need to increase num_beams use_task_specific_params(__a , __a ) # update config with task specific params a__ : Optional[int] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: a__ : Tuple = num_return_sequences a__ : int = AutoTokenizer.from_pretrained(__a ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: a__ : Optional[int] = tokenizer.model_max_length if prefix is None: a__ : List[str] = prefix or getattr(model.config , "prefix" , "" ) or "" a__ : Tuple = SeqaSeqDataset( __a , __a , __a , max_target_length=1_024 , type_path=__a , n_obs=__a , prefix=__a , **__a , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. a__ : Any = ds.make_sortish_sampler(__a , distributed=__a , add_extra_examples=__a , shuffle=__a ) a__ : List[str] = DataLoader(__a , sampler=__a , batch_size=__a , collate_fn=ds.collate_fn ) a__ : List[str] = [] for batch in tqdm(__a ): a__ : List[str] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__a , num_beams=__a , **__a , ) a__ : int = tokenizer.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) a__ : Optional[int] = batch["ids"] if num_return_sequences > 1: a__ : Any = chunks(__a , __a ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__a ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__a , __a ) return results, sampler.num_replicas def UpperCamelCase_ ( ) -> List[str]: a__ : Any = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__a , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__a , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__a , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__a , default=__a ) parser.add_argument( "--type_path" , type=__a , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__a , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__a , default=8 , required=__a , help="batch size" ) parser.add_argument( "--local_rank" , type=__a , default=-1 , required=__a , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__a , default=__a , required=__a , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__a , default=1 , required=__a , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__a , default=600 , required=__a , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__a , default=__a , required=__a ) parser.add_argument("--tgt_lang" , type=__a , default=__a , required=__a ) parser.add_argument( "--prefix" , type=__a , required=__a , default=__a , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) a__ : Tuple = time.time() a__, a__ : str = parser.parse_known_args() a__ : Any = parse_numeric_n_bool_cl_kwargs(__a ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) a__ : int = Path(args.save_dir + "_tmp" ) Path(__a ).mkdir(exist_ok=__a ) # this handles locking. a__ : Any = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. a__ : Dict = {} if args.src_lang is not None: a__ : Any = args.src_lang if args.tgt_lang is not None: a__ : Tuple = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__a ) a__, a__ : int = eval_data_dir( args.data_dir , __a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__a , **__a , ) if args.local_rank <= 0: a__ : Tuple = Path(args.save_dir ) save_dir.mkdir(exist_ok=__a ) a__ : Optional[int] = gather_results_from_each_node(__a , __a , args.sync_timeout ) a__ : Tuple = combine_partial_results(__a ) if args.num_return_sequences > 1: a__ : Dict = save_dir.joinpath("pseudolabel_results.json" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(__a , __a ) return a__ : str = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__a ) as f: a__ : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(__a )] # Calculate metrics, save metrics, and save _generations.txt a__ : Dict = "translation" in args.task a__ : str = calculate_bleu if calc_bleu else calculate_rouge a__ : str = "bleu" if calc_bleu else "rouge" a__ : Dict = score_fn(__a , __a ) a__ : List[str] = len(__a ) a__ : Any = time.time() - start_time a__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 ) a__ : int = num_replicas # TODO(@stas00): add whatever metadata to metrics a__ : Dict = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(__a , __a , indent=__a ) print(__a ) write_txt_file(__a , save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(__a , save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(__a ) def UpperCamelCase_ ( __a ) -> List: a__ : List[str] = [] for partial_result in partial_results: records.extend(__a ) a__ : List[str] = sorted(__a , key=lambda __a : x["id"] ) a__ : Union[str, Any] = [x["pred"] for x in records] return preds def UpperCamelCase_ ( __a , __a , __a ) -> List[Dict[str, List]]: # WAIT FOR lots of .json files a__ : List[Any] = time.time() logger.info("waiting for all nodes to finish" ) a__ : Dict = None while (time.time() - start_wait) < timeout: a__ : Optional[Any] = list(save_dir.glob("rank_*.json" ) ) if len(__a ) < num_replicas: continue try: # make sure all json files are fully saved a__ : Union[str, Any] = lmap(__a , __a ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Tuple = {"""vocab_file""": """vocab.txt"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } UpperCamelCase : int = { """openbmb/cpm-ant-10b""": 1024, } def UpperCamelCase_ ( __a ) -> Tuple: a__ : List[str] = collections.OrderedDict() with open(__a , "r" , encoding="utf-8" ) as reader: a__ : Optional[int] = reader.readlines() for index, token in enumerate(__a ): a__ : List[str] = token.rstrip("\n" ) a__ : Tuple = index return vocab class A__ ( A__ ): """simple docstring""" def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Any=200 ): a__ : Tuple = vocab a__ : Tuple = unk_token a__ : List[Any] = max_input_chars_per_word def _UpperCamelCase( self : int , lowerCamelCase__ : List[str] ): a__ : List[str] = list(lowerCamelCase__ ) if len(lowerCamelCase__ ) > self.max_input_chars_per_word: return [self.unk_token] a__ : Optional[int] = 0 a__ : Tuple = [] while start < len(lowerCamelCase__ ): a__ : Union[str, Any] = len(lowerCamelCase__ ) a__ : List[str] = None while start < end: a__ : int = "".join(chars[start:end] ) if substr in self.vocab: a__ : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCamelCase__ ) a__ : List[Any] = end return sub_tokens class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = False def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]="<d>" , lowerCamelCase__ : List[Any]="</d>" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<pad>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : int="</n>" , lowerCamelCase__ : Optional[Any]="</_>" , lowerCamelCase__ : Any="left" , **lowerCamelCase__ : Optional[Any] , ): requires_backends(self , ["jieba"] ) super().__init__( bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Union[str, Any] = bod_token a__ : Optional[Any] = eod_token a__ : Any = load_vocab(lowerCamelCase__ ) a__ : Dict = self.encoder[space_token] a__ : List[str] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] a__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) ) a__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} a__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _UpperCamelCase( self : List[str] ): return self.encoder[self.bod_token] @property def _UpperCamelCase( self : Union[str, Any] ): return self.encoder[self.eod_token] @property def _UpperCamelCase( self : Tuple ): return self.encoder["\n"] @property def _UpperCamelCase( self : Optional[int] ): return len(self.encoder ) def _UpperCamelCase( self : Dict ): return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[Any] ): a__ : Optional[Any] = [] for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) ) return output_tokens def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Tuple ): a__ : Tuple = [i for i in token_ids if i >= 0] a__ : List[str] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] ): return token in self.encoder def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] ): return "".join(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ): return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple ): return self.decoder.get(lowerCamelCase__ , self.unk_token ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): if os.path.isdir(lowerCamelCase__ ): a__ : Optional[int] = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: a__ : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory a__ : Dict = 0 if " " in self.encoder: a__ : int = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: a__ : int = self.encoder["\n"] del self.encoder["\n"] a__ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) a__ : List[Any] = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) return [1] + ([0] * len(lowerCamelCase__ ))
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
def UpperCamelCase_ ( __a , __a ) -> list: a__ : str = word.split() def justify(__a , __a , __a ) -> str: a__ : List[str] = max_width - width a__ : str = len(__a ) if len(__a ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: a__ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] a__ : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] a__ : List[Any] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__a ): num_spaces_between_words_list[i] += 1 a__ : Union[str, Any] = [] for i in range(__a ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__a ) a__ : Any = [] a__ : list[str] = [] a__ : Any = 0 for word in words: if width + len(__a ) + len(__a ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__a ) width += len(__a ) else: # justify the line and add it to result answer.append(justify(__a , __a , __a ) ) # reset new line and new width a__, a__ : Optional[int] = [word], len(__a ) a__ : int = max_width - width - len(__a ) answer.append(" ".join(__a ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A__ ( A__ ): """simple docstring""" _lowercase = (PNDMScheduler,) _lowercase = (('num_inference_steps', 5_0),) def _UpperCamelCase( self : int , **lowerCamelCase__ : str ): a__ : Optional[int] = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCamelCase__ ) return config def _UpperCamelCase( self : str , lowerCamelCase__ : Any=0 , **lowerCamelCase__ : Tuple ): a__ : List[str] = dict(self.forward_default_kwargs ) a__ : Any = kwargs.pop("num_inference_steps" , lowerCamelCase__ ) a__ : Union[str, Any] = self.dummy_sample a__ : Optional[int] = 0.1 * sample a__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a__ : List[Any] = self.get_scheduler_config(**lowerCamelCase__ ) a__ : str = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals a__ : Optional[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) a__ : Tuple = scheduler_class.from_pretrained(lowerCamelCase__ ) new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals a__ : Optional[Any] = dummy_past_residuals[:] a__ : int = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample a__ : Optional[Any] = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a__ : Optional[Any] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample a__ : Dict = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _UpperCamelCase( self : Tuple ): pass def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any]=0 , **lowerCamelCase__ : List[Any] ): a__ : List[Any] = dict(self.forward_default_kwargs ) a__ : List[Any] = kwargs.pop("num_inference_steps" , lowerCamelCase__ ) a__ : int = self.dummy_sample a__ : List[str] = 0.1 * sample a__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a__ : List[str] = self.get_scheduler_config() a__ : List[str] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) a__ : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) a__ : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) a__ : Optional[Any] = dummy_past_residuals[:] a__ : List[str] = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample a__ : List[Any] = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a__ : Union[str, Any] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample a__ : str = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _UpperCamelCase( self : int , **lowerCamelCase__ : Tuple ): a__ : Union[str, Any] = self.scheduler_classes[0] a__ : Dict = self.get_scheduler_config(**lowerCamelCase__ ) a__ : Optional[Any] = scheduler_class(**lowerCamelCase__ ) a__ : Any = 10 a__ : List[str] = self.dummy_model() a__ : Tuple = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): a__ : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ) a__ : str = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): a__ : str = model(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample return sample def _UpperCamelCase( self : str ): a__ : Optional[Any] = dict(self.forward_default_kwargs ) a__ : Tuple = kwargs.pop("num_inference_steps" , lowerCamelCase__ ) for scheduler_class in self.scheduler_classes: a__ : int = self.get_scheduler_config() a__ : List[Any] = scheduler_class(**lowerCamelCase__ ) a__ : Dict = self.dummy_sample a__ : Tuple = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase__ , "set_timesteps" ): scheduler.set_timesteps(lowerCamelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , "set_timesteps" ): a__ : str = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a__ : Dict = dummy_past_residuals[:] a__ : str = scheduler.step_prk(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample a__ : Union[str, Any] = scheduler.step_prk(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a__ : Dict = scheduler.step_plms(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample a__ : str = scheduler.step_plms(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _UpperCamelCase( self : Optional[int] ): for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCamelCase__ ) a__ : str = self.scheduler_classes[0] a__ : str = self.get_scheduler_config(steps_offset=1 ) a__ : Optional[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def _UpperCamelCase( self : List[str] ): for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 a__ : Optional[int] = 27 for scheduler_class in self.scheduler_classes: a__ : int = self.dummy_sample a__ : Optional[int] = 0.1 * sample a__ : str = self.get_scheduler_config() a__ : List[str] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): a__ : Dict = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample def _UpperCamelCase( self : Optional[Any] ): with self.assertRaises(lowerCamelCase__ ): a__ : Union[str, Any] = self.scheduler_classes[0] a__ : Optional[Any] = self.get_scheduler_config() a__ : Union[str, Any] = scheduler_class(**lowerCamelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _UpperCamelCase( self : Any ): a__ : Union[str, Any] = self.full_loop() a__ : str = torch.sum(torch.abs(lowerCamelCase__ ) ) a__ : Dict = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def _UpperCamelCase( self : Tuple ): a__ : Dict = self.full_loop(prediction_type="v_prediction" ) a__ : Optional[int] = torch.sum(torch.abs(lowerCamelCase__ ) ) a__ : int = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def _UpperCamelCase( self : Tuple ): # We specify different beta, so that the first alpha is 0.99 a__ : Tuple = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 ) a__ : Tuple = torch.sum(torch.abs(lowerCamelCase__ ) ) a__ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def _UpperCamelCase( self : List[Any] ): # We specify different beta, so that the first alpha is 0.99 a__ : List[str] = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 ) a__ : Optional[int] = torch.sum(torch.abs(lowerCamelCase__ ) ) a__ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase_ ( __a="" ) -> str: a__ : Dict = tempfile.mkdtemp() return os.path.join(__a , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : List[str] ): a__ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5 a__ : Tuple = AgentAudio(lowerCamelCase__ ) a__ : Tuple = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowerCamelCase__ ) ) # Ensure that the file contains the same value as the original tensor a__, a__ : int = sf.read(lowerCamelCase__ ) self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1E-4 ) ) def _UpperCamelCase( self : int ): a__ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5 a__ : Any = get_new_path(suffix=".wav" ) sf.write(lowerCamelCase__ , lowerCamelCase__ , 16_000 ) a__ : int = AgentAudio(lowerCamelCase__ ) self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , lowerCamelCase__ ) @require_vision @require_torch class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Union[str, Any] = torch.randint(0 , 256 , (64, 64, 3) ) a__ : Optional[int] = AgentImage(lowerCamelCase__ ) a__ : List[str] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase__ ) ) def _UpperCamelCase( self : Optional[Any] ): a__ : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" a__ : Any = Image.open(lowerCamelCase__ ) a__ : Tuple = AgentImage(lowerCamelCase__ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase__ ) ) def _UpperCamelCase( self : str ): a__ : Any = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" a__ : Any = Image.open(lowerCamelCase__ ) a__ : Tuple = AgentImage(lowerCamelCase__ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase__ ) ) class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Tuple = "Hey!" a__ : Tuple = AgentText(lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , agent_type.to_string() ) self.assertEqual(lowerCamelCase__ , agent_type.to_raw() ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str ): with open(lowerCamelCase__ , encoding="utf-8" ) as input_file: a__ : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a__ : Any = input_file.read() a__ : Tuple = regexp.search(lowerCamelCase__ ) return match def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str ): with open(lowerCamelCase__ , encoding="utf-8" ) as input_file: a__ : str = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a__ : List[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a__ : str = regexp.finditer(lowerCamelCase__ ) a__ : Any = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCamelCase( self : int ): a__ : Optional[int] = Path("./datasets" ) a__ : Dict = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(lowerCamelCase__ ) ): raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' ) def _UpperCamelCase( self : Optional[int] ): a__ : Tuple = Path("./datasets" ) a__ : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(lowerCamelCase__ ) ): raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
from __future__ import annotations def UpperCamelCase_ ( __a , __a = None , __a = None ) -> None: if start is None: a__ : int = 0 if end is None: a__ : str = len(__a ) - 1 if start >= end: return a__ : Union[str, Any] = (start + end) // 2 slowsort(__a , __a , __a ) slowsort(__a , mid + 1 , __a ) if sequence[end] < sequence[mid]: a__, a__ : int = sequence[mid], sequence[end] slowsort(__a , __a , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a=None , __a=None ) -> List[str]: return field(default_factory=lambda: default , metadata=__a ) @dataclass class A__ : """simple docstring""" _lowercase = list_field( default=[] , metadata={ 'help': ( 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version' ' of all available models' ) } , ) _lowercase = list_field( default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} ) _lowercase = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , ) _lowercase = field( default=A__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , ) _lowercase = field( default=A__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , ) _lowercase = field( default=A__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} ) _lowercase = field(default=A__ , metadata={'help': 'Use FP16 to accelerate inference.'} ) _lowercase = field(default=A__ , metadata={'help': 'Benchmark training of model'} ) _lowercase = field(default=A__ , metadata={'help': 'Verbose memory tracing'} ) _lowercase = field( default=A__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , ) _lowercase = field( default=A__ , metadata={ 'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory' } , ) _lowercase = field(default=A__ , metadata={'help': 'Trace memory line by line'} ) _lowercase = field(default=A__ , metadata={'help': 'Save result to a CSV file'} ) _lowercase = field(default=A__ , metadata={'help': 'Save all print statements in a log file'} ) _lowercase = field(default=A__ , metadata={'help': 'Whether to print environment information'} ) _lowercase = field( default=A__ , metadata={ 'help': ( 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use' ' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled' ' for debugging / testing and on TPU.' ) } , ) _lowercase = field( default=F'inference_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv.'} , ) _lowercase = field( default=F'inference_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , ) _lowercase = field( default=F'train_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , ) _lowercase = field( default=F'train_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , ) _lowercase = field( default=F'env_info_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving environment information.'} , ) _lowercase = field( default=F'log_{round(time() )}.csv' , metadata={'help': 'Log filename used if print statements are saved in log.'} , ) _lowercase = field(default=3 , metadata={'help': 'Times an experiment will be run.'} ) _lowercase = field( default=A__ , metadata={ 'help': ( 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain' ' model weights.' ) } , ) def _UpperCamelCase( self : Optional[int] ): warnings.warn( f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models." , lowerCamelCase__ , ) def _UpperCamelCase( self : Union[str, Any] ): return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def _UpperCamelCase( self : List[str] ): if len(self.models ) <= 0: raise ValueError( "Please make sure you provide at least one model name / model identifier, *e.g.* `--models" " bert-base-cased` or `args.models = ['bert-base-cased']." ) return self.models @property def _UpperCamelCase( self : Any ): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU." ) return False else: return True
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Any = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[Any] = { """google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class A__ ( A__ ): """simple docstring""" _lowercase = 'umt5' _lowercase = ['past_key_values'] def __init__( self : Union[str, Any] , lowerCamelCase__ : List[Any]=250_112 , lowerCamelCase__ : str=512 , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : List[Any]=1_024 , lowerCamelCase__ : int=8 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[Any]=6 , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=128 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[Any]=1E-6 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]="gated-gelu" , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any="T5Tokenizer" , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : List[str]=0 , **lowerCamelCase__ : Any , ): super().__init__( is_encoder_decoder=lowerCamelCase__ , tokenizer_class=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Dict = vocab_size a__ : List[Any] = d_model a__ : Optional[Any] = d_kv a__ : Optional[int] = d_ff a__ : List[Any] = num_layers a__ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a__ : str = num_heads a__ : Optional[int] = relative_attention_num_buckets a__ : Any = relative_attention_max_distance a__ : int = dropout_rate a__ : Union[str, Any] = layer_norm_epsilon a__ : Optional[int] = initializer_factor a__ : Optional[int] = feed_forward_proj a__ : Tuple = use_cache a__ : List[Any] = self.feed_forward_proj.split("-" ) a__ : Optional[int] = act_info[-1] a__ : Union[str, Any] = act_info[0] == "gated" if len(lowerCamelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": a__ : Optional[int] = "gelu_new" @property def _UpperCamelCase( self : int ): return self.d_model @property def _UpperCamelCase( self : int ): return self.num_heads @property def _UpperCamelCase( self : Dict ): return self.num_layers class A__ ( A__ ): """simple docstring""" @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: a__ : Tuple = "past_encoder_sequence + sequence" a__ : Dict = {0: "batch"} a__ : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: a__ : Tuple = {0: "batch", 1: "decoder_sequence"} a__ : List[str] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def _UpperCamelCase( self : int ): return 13 @property def _UpperCamelCase( self : List[str] ): return 5E-4
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['transformers', 'torch', 'note_seq'] def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ): requires_backends(cls , ["transformers", "torch", "note_seq"] )
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
def UpperCamelCase_ ( __a ) -> bool: a__ : List[Any] = 0 for ch in input_str: a__ : str = ord(__a ) a__ : Any = pow(2 , __a ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP UpperCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase : str = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def UpperCamelCase_ ( __a , __a , __a=8 ) -> List[str]: a__ : List[str] = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 a__ : Optional[Any] = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : MultilingualCLIP , lowerCamelCase__ : XLMRobertaTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, DDPMScheduler] , lowerCamelCase__ : VQModel , ): super().__init__() self.register_modules( text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , movq=lowerCamelCase__ , ) a__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _UpperCamelCase( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ): if latents is None: a__ : Tuple = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a__ : Optional[Any] = latents.to(lowerCamelCase__ ) a__ : Tuple = latents * scheduler.init_noise_sigma return latents def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=None , ): a__ : Union[str, Any] = len(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else 1 # get prompt text embeddings a__ : List[Any] = self.tokenizer( lowerCamelCase__ , padding="max_length" , truncation=lowerCamelCase__ , max_length=77 , return_attention_mask=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" , ) a__ : Tuple = text_inputs.input_ids a__ : Union[str, Any] = self.tokenizer(lowerCamelCase__ , padding="longest" , return_tensors="pt" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase__ , lowerCamelCase__ ): a__ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) a__ : Optional[Any] = text_input_ids.to(lowerCamelCase__ ) a__ : List[Any] = text_inputs.attention_mask.to(lowerCamelCase__ ) a__, a__ : str = self.text_encoder( input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ ) a__ : Any = prompt_embeds.repeat_interleave(lowerCamelCase__ , dim=0 ) a__ : Optional[Any] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase__ , dim=0 ) a__ : Optional[int] = text_mask.repeat_interleave(lowerCamelCase__ , dim=0 ) if do_classifier_free_guidance: a__ : List[str] if negative_prompt is None: a__ : Optional[int] = [""] * batch_size elif type(lowerCamelCase__ ) is not type(lowerCamelCase__ ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase__ )} !=''' f''' {type(lowerCamelCase__ )}.''' ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : Dict = [negative_prompt] elif batch_size != len(lowerCamelCase__ ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase__ )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: a__ : Any = negative_prompt a__ : Any = self.tokenizer( lowerCamelCase__ , padding="max_length" , max_length=77 , truncation=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" , ) a__ : Dict = uncond_input.input_ids.to(lowerCamelCase__ ) a__ : Dict = uncond_input.attention_mask.to(lowerCamelCase__ ) a__, a__ : Any = self.text_encoder( input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method a__ : Tuple = negative_prompt_embeds.shape[1] a__ : List[Any] = negative_prompt_embeds.repeat(1 , lowerCamelCase__ ) a__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase__ ) a__ : List[str] = uncond_text_encoder_hidden_states.shape[1] a__ : Dict = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase__ , 1 ) a__ : List[str] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , lowerCamelCase__ , -1 ) a__ : List[str] = uncond_text_mask.repeat_interleave(lowerCamelCase__ , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a__ : List[str] = torch.cat([negative_prompt_embeds, prompt_embeds] ) a__ : Union[str, Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) a__ : str = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a__ : str = torch.device(f'''cuda:{gpu_id}''' ) a__ : Dict = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : str=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) a__ : List[str] = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowerCamelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a__ : Optional[int] = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: a__, a__ : Optional[Any] = cpu_offload_with_hook(lowerCamelCase__ , lowerCamelCase__ , prev_module_hook=lowerCamelCase__ ) if self.safety_checker is not None: a__, a__ : List[Any] = cpu_offload_with_hook(self.safety_checker , lowerCamelCase__ , prev_module_hook=lowerCamelCase__ ) # We'll offload the last model manually. a__ : Optional[int] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _UpperCamelCase( self : Union[str, Any] ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase__ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase__ ) def __call__( self : List[Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 100 , lowerCamelCase__ : float = 4.0 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : int = 1 elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : List[str] = len(lowerCamelCase__ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}''' ) a__ : Dict = self._execution_device a__ : List[str] = batch_size * num_images_per_prompt a__ : Tuple = guidance_scale > 1.0 a__, a__, a__ : Optional[int] = self._encode_prompt( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : List[Any] = torch.cat(lowerCamelCase__ , dim=0 ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : int = torch.cat(lowerCamelCase__ , dim=0 ) if do_classifier_free_guidance: a__ : Any = image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 ) a__ : Any = negative_image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 ) a__ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=lowerCamelCase__ ) self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__ ) a__ : Union[str, Any] = self.scheduler.timesteps a__ : Any = self.unet.config.in_channels a__, a__ : Optional[int] = get_new_h_w(lowerCamelCase__ , lowerCamelCase__ , self.movq_scale_factor ) # create initial latent a__ : Tuple = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ): # expand the latents if we are doing classifier free guidance a__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a__ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} a__ : Dict = self.unet( sample=lowerCamelCase__ , timestep=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , added_cond_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0] if do_classifier_free_guidance: a__, a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) a__, a__ : Tuple = noise_pred.chunk(2 ) a__, a__ : Dict = variance_pred.chunk(2 ) a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a__ : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a__, a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a__ : Tuple = self.scheduler.step( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample # post-processing a__ : Any = self.movq.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a__ : Optional[Any] = image * 0.5 + 0.5 a__ : List[str] = image.clamp(0 , 1 ) a__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a__ : Tuple = self.numpy_to_pil(lowerCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase__ )
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") UpperCamelCase : Optional[int] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) UpperCamelCase : Any = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(1_0000): out_file.write(data) UpperCamelCase : Optional[Any] = BeautifulSoup(res.text, """html.parser""") UpperCamelCase : Dict = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(f"""https://google.com{link.get("href")}""")
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : List[str] = { """tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""", """tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""", } class A__ ( A__ ): """simple docstring""" _lowercase = 'falcon' _lowercase = ['past_key_values'] def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any]=65_024 , lowerCamelCase__ : List[Any]=4_544 , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[Any]=71 , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : int=11 , lowerCamelCase__ : List[Any]=11 , **lowerCamelCase__ : int , ): a__ : Any = vocab_size # Backward compatibility with n_embed kwarg a__ : Dict = kwargs.pop("n_embed" , lowerCamelCase__ ) a__ : str = hidden_size if n_embed is None else n_embed a__ : Tuple = num_hidden_layers a__ : Dict = num_attention_heads a__ : Tuple = layer_norm_epsilon a__ : List[str] = initializer_range a__ : Dict = use_cache a__ : Tuple = hidden_dropout a__ : str = attention_dropout a__ : str = bos_token_id a__ : Any = eos_token_id a__ : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads a__ : Union[str, Any] = alibi a__ : List[str] = new_decoder_architecture a__ : Optional[Any] = multi_query # Ignored when new_decoder_architecture is True a__ : int = parallel_attn a__ : Any = bias super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) @property def _UpperCamelCase( self : Optional[Any] ): return self.hidden_size // self.num_attention_heads @property def _UpperCamelCase( self : Dict ): return not self.alibi
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCamelCase_ ( __a , __a , __a ) -> Any: a__ : Optional[Any] = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a__ : Dict = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } a__ : Tuple = f'''{src_lang}-{tgt_lang}''' a__ : int = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__a , exist_ok=__a ) a__ : Union[str, Any] = os.path.join(__a , "README.md" ) print(f'''Generating {path}''' ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project UpperCamelCase : Tuple = Path(__file__).resolve().parent.parent.parent UpperCamelCase : Optional[int] = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = model_name.split("""-""") UpperCamelCase : Dict = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
def UpperCamelCase_ ( __a ) -> "list[int]": if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) a__ : List[str] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 a__ : Optional[int] = 1 if upper_limit > 0: a__ : Optional[Any] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__a ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: UpperCamelCase : Optional[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class A__ ( A__ ): """simple docstring""" def __init__( self : Any ): a__ : List[Any] = [] def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): self.events.append("on_init_end" ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ): self.events.append("on_train_begin" ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , **lowerCamelCase__ : Tuple ): self.events.append("on_train_end" ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ): self.events.append("on_epoch_begin" ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ): self.events.append("on_epoch_end" ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : int ): self.events.append("on_step_begin" ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[Any] ): self.events.append("on_step_end" ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , **lowerCamelCase__ : int ): self.events.append("on_evaluate" ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Dict ): self.events.append("on_predict" ) def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : str , **lowerCamelCase__ : int ): self.events.append("on_save" ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ): self.events.append("on_log" ) def _UpperCamelCase( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , **lowerCamelCase__ : List[str] ): self.events.append("on_prediction_step" ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[Any] ): a__ : str = tempfile.mkdtemp() def _UpperCamelCase( self : str ): shutil.rmtree(self.output_dir ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : Union[str, Any]=64 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Dict=False , **lowerCamelCase__ : Optional[Any] ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. a__ : Optional[Any] = RegressionDataset(length=lowerCamelCase__ ) a__ : Union[str, Any] = RegressionDataset(length=lowerCamelCase__ ) a__ : Dict = RegressionModelConfig(a=lowerCamelCase__ , b=lowerCamelCase__ ) a__ : List[str] = RegressionPreTrainedModel(lowerCamelCase__ ) a__ : int = TrainingArguments(self.output_dir , disable_tqdm=lowerCamelCase__ , report_to=[] , **lowerCamelCase__ ) return Trainer( lowerCamelCase__ , lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , callbacks=lowerCamelCase__ , ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ): self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) # Order doesn't matter a__ : Union[str, Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : cb.__name__ if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cb.__class__.__name__ ) a__ : Optional[Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : cb.__name__ if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cb.__class__.__name__ ) for cba, cba in zip(lowerCamelCase__ , lowerCamelCase__ ): if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(lowerCamelCase__ , cba.__class__ ) elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(cba.__class__ , lowerCamelCase__ ) else: self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ): a__ : Optional[int] = ["on_init_end", "on_train_begin"] a__ : Dict = 0 a__ : List[Any] = len(trainer.get_eval_dataloader() ) a__ : Dict = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(lowerCamelCase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _UpperCamelCase( self : str ): a__ : str = self.get_trainer() a__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) # Callbacks passed at init are added to the default callbacks a__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback a__ : Any = self.get_trainer(disable_tqdm=lowerCamelCase__ ) a__ : Dict = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): a__ : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] a__ : str = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowerCamelCase__ ) expected_callbacks.remove(lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) a__ : List[Any] = self.get_trainer() a__ : Dict = trainer.pop_callback(lowerCamelCase__ ) self.assertEqual(cb.__class__ , lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) trainer.add_callback(lowerCamelCase__ ) expected_callbacks.insert(0 , lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) # We can also add, pop, or remove by instance a__ : Union[str, Any] = self.get_trainer() a__ : Optional[int] = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowerCamelCase__ ) expected_callbacks.remove(lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) a__ : Dict = self.get_trainer() a__ : List[Any] = trainer.callback_handler.callbacks[0] a__ : Optional[int] = trainer.pop_callback(lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) trainer.add_callback(lowerCamelCase__ ) expected_callbacks.insert(0 , lowerCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ ) def _UpperCamelCase( self : str ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=lowerCamelCase__ ) a__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() a__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) ) # Independent log/save/eval a__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() a__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) ) a__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() a__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) ) a__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() a__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) ) a__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() a__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) ) # A bit of everything a__ : List[Any] = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() a__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: a__ : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(lowerCamelCase__ ) in warn_mock.call_args[0][0]
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import copy import random from transformers import CLIPTokenizer class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ): super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) a__ : Dict = {} def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ): a__ : Optional[Any] = super().add_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) if num_added_tokens == 0: raise ValueError( f'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' " `placeholder_token` that is not already in the tokenizer." ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Dict , *lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any=1 , **lowerCamelCase__ : List[str] ): a__ : int = [] if num_vec_per_token == 1: self.try_adding_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) output.append(lowerCamelCase__ ) else: a__ : str = [] for i in range(lowerCamelCase__ ): a__ : List[str] = placeholder_token + f'''_{i}''' self.try_adding_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) output.append(lowerCamelCase__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f'''The tokenizer already has placeholder token {token} that can get confused with''' f''' {placeholder_token}keep placeholder tokens independent''' ) a__ : List[Any] = output def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=1.0 ): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : Any = [] for i in range(len(lowerCamelCase__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: a__ : Any = self.token_map[placeholder_token] a__ : Optional[Any] = tokens[: 1 + int(len(lowerCamelCase__ ) * prop_tokens_to_load )] if vector_shuffle: a__ : str = copy.copy(lowerCamelCase__ ) random.shuffle(lowerCamelCase__ ) a__ : Union[str, Any] = text.replace(lowerCamelCase__ , " ".join(lowerCamelCase__ ) ) return text def __call__( self : str , lowerCamelCase__ : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[str]=1.0 , **lowerCamelCase__ : Tuple ): return super().__call__( self.replace_placeholder_tokens_in_text( lowerCamelCase__ , vector_shuffle=lowerCamelCase__ , prop_tokens_to_load=lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Any=1.0 , **lowerCamelCase__ : List[str] ): return super().encode( self.replace_placeholder_tokens_in_text( lowerCamelCase__ , vector_shuffle=lowerCamelCase__ , prop_tokens_to_load=lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ , )
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase_ ( __a , __a , __a , __a , __a ) -> Tuple: # Load configuration defined in the metadata file with open(__a ) as metadata_file: a__ : Optional[int] = json.load(__a ) a__ : str = LukeConfig(use_entity_aware_attention=__a , **metadata["model_config"] ) # Load in the weights from the checkpoint_path a__ : List[Any] = torch.load(__a , map_location="cpu" )["module"] # Load the entity vocab file a__ : Union[str, Any] = load_original_entity_vocab(__a ) # add an entry for [MASK2] a__ : List[Any] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 a__ : Optional[int] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks a__ : Union[str, Any] = AddedToken("<ent>" , lstrip=__a , rstrip=__a ) a__ : Dict = AddedToken("<ent2>" , lstrip=__a , rstrip=__a ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__a ) with open(os.path.join(__a , "tokenizer_config.json" ) , "r" ) as f: a__ : int = json.load(__a ) a__ : List[str] = "MLukeTokenizer" with open(os.path.join(__a , "tokenizer_config.json" ) , "w" ) as f: json.dump(__a , __a ) with open(os.path.join(__a , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(__a , __a ) a__ : Dict = MLukeTokenizer.from_pretrained(__a ) # Initialize the embeddings of the special tokens a__ : Dict = tokenizer.convert_tokens_to_ids(["@"] )[0] a__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(["#"] )[0] a__ : List[str] = state_dict["embeddings.word_embeddings.weight"] a__ : Optional[int] = word_emb[ent_init_index].unsqueeze(0 ) a__ : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) a__ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: a__ : str = state_dict[bias_name] a__ : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) a__ : str = decoder_bias[enta_init_index].unsqueeze(0 ) a__ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: a__ : Optional[Any] = f'''encoder.layer.{layer_index}.attention.self.''' a__ : List[str] = state_dict[prefix + matrix_name] a__ : str = state_dict[prefix + matrix_name] a__ : Any = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks a__ : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"] a__ : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) a__ : List[Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' a__ : List[str] = state_dict["entity_predictions.bias"] a__ : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) a__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] ) a__ : int = LukeForMaskedLM(config=__a ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) a__ : Dict = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): a__ : Union[str, Any] = state_dict[key] else: a__ : Dict = state_dict[key] a__, a__ : List[str] = model.load_state_dict(__a , strict=__a ) if set(__a ) != {"luke.embeddings.position_ids"}: raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(__a ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs a__ : int = MLukeTokenizer.from_pretrained(__a , task="entity_classification" ) a__ : int = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." a__ : Optional[Any] = (0, 9) a__ : int = tokenizer(__a , entity_spans=[span] , return_tensors="pt" ) a__ : Optional[int] = model(**__a ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base a__ : List[Any] = torch.Size((1, 33, 768) ) a__ : int = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base a__ : str = torch.Size((1, 1, 768) ) a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction a__ : int = MLukeTokenizer.from_pretrained(__a ) a__ : int = "Tokyo is the capital of <mask>." a__ : Any = (24, 30) a__ : Optional[int] = tokenizer(__a , entity_spans=[span] , return_tensors="pt" ) a__ : Union[str, Any] = model(**__a ) a__ : Any = encoding["input_ids"][0].tolist() a__ : int = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) a__ : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__a ) a__ : List[Any] = outputs.entity_logits[0][0].argmax().item() a__ : Tuple = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__a ) ) model.save_pretrained(__a ) def UpperCamelCase_ ( __a ) -> int: a__ : str = ["[MASK]", "[PAD]", "[UNK]"] a__ : List[Any] = [json.loads(__a ) for line in open(__a )] a__ : Optional[Any] = {} for entry in data: a__ : Optional[int] = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: a__ : Dict = entity_id break a__ : Union[str, Any] = f'''{language}:{entity_name}''' a__ : Union[str, Any] = entity_id return new_mapping if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) UpperCamelCase : Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() UpperCamelCase : Dict = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def UpperCamelCase_ ( __a , __a , __a , __a , __a=False , __a=True ) -> Tuple: if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) a__, a__, a__, a__ : List[str] = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: a__ : Optional[Any] = cached_file(__a , __a , force_download=not use_cached_models ) a__ : Tuple = config_class.from_json_file(__a ) a__ : List[Any] = True a__ : Optional[int] = True print(f'''Building TensorFlow model from configuration: {config}''' ) a__ : List[str] = model_class(__a ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): a__ : int = cached_file( __a , __a , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: a__ : List[str] = load_pytorch_checkpoint_in_tfa_model(__a , __a ) if compare_with_pt_model: a__ : Optional[int] = tf_model(tf_model.dummy_inputs , training=__a ) # build the network a__ : Union[str, Any] = torch.load(__a , map_location="cpu" ) a__ : Optional[Any] = pt_model_class.from_pretrained( pretrained_model_name_or_path=__a , config=__a , state_dict=__a ) with torch.no_grad(): a__ : Dict = pt_model(**pt_model.dummy_inputs ) a__ : str = pto[0].numpy() a__ : Union[str, Any] = tfo[0].numpy() a__ : Any = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(__a , save_format="h5" ) def UpperCamelCase_ ( __a , __a , __a=None , __a=None , __a=False , __a=False , __a=False , __a=False , ) -> Dict: if args_model_type is None: a__ : str = list(MODEL_CLASSES.keys() ) else: a__ : int = [args_model_type] for j, model_type in enumerate(__a , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(__a )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) a__, a__, a__, a__, a__ : List[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: a__ : int = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: a__ : Optional[Any] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__a , __a ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue a__ : Any = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(__a )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: a__ : Dict = cached_file(__a , __a , force_download=not use_cached_models ) else: a__ : str = config_shortcut_name if model_shortcut_name in aws_model_maps: a__ : List[str] = cached_file(__a , __a , force_download=not use_cached_models ) else: a__ : Optional[Any] = model_shortcut_name if os.path.isfile(__a ): a__ : int = "converted_model" convert_pt_checkpoint_to_tf( model_type=__a , pytorch_checkpoint_path=__a , config_file=__a , tf_dump_path=os.path.join(__a , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=__a , ) if remove_cached_files: os.remove(__a ) os.remove(__a ) if __name__ == "__main__": UpperCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") UpperCamelCase : List[Any] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
from ....utils import logging UpperCamelCase : Optional[int] = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=2_048 ): a__ : Optional[Any] = config.__dict__ a__ : List[Any] = modal_hidden_size if num_labels: a__ : Tuple = num_labels
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) UpperCamelCase : Optional[int] = logging.getLogger() UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Tuple ): os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) a__ : Optional[Any] = {"source": "What is love ?", "target": "life"} a__ : int = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: a__ : Any = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(lowerCamelCase__ , f'''{split}.{field}''' ) , "w" ) as f: f.write(lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str = "pytorch" ): a__ : List[Any] = self.get_auto_remove_tmp_dir() a__ : Dict = os.path.join(lowerCamelCase__ , "output" ) a__ : List[Any] = os.path.join(lowerCamelCase__ , "data" ) self._create_dummy_data(data_dir=lowerCamelCase__ ) a__ : Optional[Any] = f''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(f'''--gpus={gpus}''' ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) a__ : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(lowerCamelCase__ , env=self.get_env() ) a__ : List[Any] = os.path.join(lowerCamelCase__ , "metrics.json" ) with open(lowerCamelCase__ ) as f: a__ : str = json.load(lowerCamelCase__ ) return result @require_torch_gpu def _UpperCamelCase( self : List[str] ): a__ : List[Any] = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _UpperCamelCase( self : Optional[Any] ): a__ : Union[str, Any] = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _UpperCamelCase( self : List[str] ): a__ : int = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _UpperCamelCase( self : List[Any] ): a__ : str = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
def UpperCamelCase_ ( __a , __a ) -> int: return x if y == 0 else greatest_common_divisor(__a , x % y ) def UpperCamelCase_ ( __a , __a ) -> int: return (x * y) // greatest_common_divisor(__a , __a ) def UpperCamelCase_ ( __a = 20 ) -> int: a__ : str = 1 for i in range(1 , n + 1 ): a__ : List[Any] = lcm(__a , __a ) return g if __name__ == "__main__": print(f"""{solution() = }""")
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCamelCase : int = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def UpperCamelCase_ ( __a ) -> List[Any]: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def UpperCamelCase_ ( __a , __a ) -> int: if args.student_type == "roberta": a__ : str = False elif args.student_type == "gpt2": a__ : List[str] = False def UpperCamelCase_ ( __a , __a ) -> Optional[int]: if args.student_type == "roberta": a__ : Union[str, Any] = False def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=__a , required=__a , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=__a , required=__a , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=__a , choices=["distilbert", "roberta", "gpt2"] , required=__a , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=__a , required=__a , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=__a , type=__a , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__a , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=__a , required=__a , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=__a , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=__a , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=__a , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=__a , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=__a , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=__a , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=__a , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=__a , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=__a , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=__a , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=__a , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=__a , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=__a , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=__a , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=__a , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=__a , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=__a , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5e-4 , type=__a , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1e-6 , type=__a , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=__a , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=__a , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=__a , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=__a , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=__a , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=__a , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=__a , default=500 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=__a , default=4_000 , help="Checkpoint interval." ) a__ : List[str] = parser.parse_args() sanity_checks(__a ) # ARGS # init_gpu_params(__a ) set_seed(__a ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(__a ) , __a , indent=4 ) git_log(args.dump_path ) a__, a__, a__ : Dict = MODEL_CLASSES[args.student_type] a__, a__, a__ : List[Any] = MODEL_CLASSES[args.teacher_type] # TOKENIZER # a__ : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) a__ : Union[str, Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): a__ : Optional[Any] = tokenizer.all_special_tokens.index(__a ) a__ : Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) a__ : List[str] = special_tok_ids a__ : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , "rb" ) as fp: a__ : Tuple = pickle.load(__a ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , "rb" ) as fp: a__ : Optional[Any] = pickle.load(__a ) a__ : Dict = np.maximum(__a , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): a__ : List[Any] = 0.0 # do not predict special tokens a__ : List[str] = torch.from_numpy(__a ) else: a__ : Any = None a__ : Any = LmSeqsDataset(params=__a , data=__a ) logger.info("Data loader created." ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) a__ : List[str] = student_config_class.from_pretrained(args.student_config ) a__ : Optional[Any] = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) a__ : List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a ) else: a__ : List[Any] = student_model_class(__a ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info("Student loaded." ) # TEACHER # a__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__a , __a ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__a , __a ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() a__ : List[Any] = Distiller( params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: # Initialise PyTorch model a__ : List[Any] = TaConfig.from_json_file(__a ) print(f'''Building PyTorch model from configuration: {config}''' ) a__ : Optional[int] = TaForConditionalGeneration(__a ) # Load weights from tf checkpoint load_tf_weights_in_ta(__a , __a , __a ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__a ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCamelCase : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase : List[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Optional[Any] = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def UpperCamelCase_ ( ) -> Union[str, Any]: a__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) a__ : Dict = bs[:] a__ : Tuple = 0 for b in range(2**8 ): if b not in bs: bs.append(__a ) cs.append(2**8 + n ) n += 1 a__ : Any = [chr(__a ) for n in cs] return dict(zip(__a , __a ) ) def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = set() a__ : List[str] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a__ : Dict = char return pairs class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict="replace" , lowerCamelCase__ : int="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="</s>" , lowerCamelCase__ : Dict="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Optional[int]="<pad>" , lowerCamelCase__ : Dict="<mask>" , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : int , ): a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token a__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token a__ : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token a__ : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , ) with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle: a__ : List[str] = json.load(lowerCamelCase__ ) a__ : List[str] = {v: k for k, v in self.encoder.items()} a__ : int = errors # how to handle errors in decoding a__ : List[str] = bytes_to_unicode() a__ : int = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase__ , encoding="utf-8" ) as merges_handle: a__ : List[str] = merges_handle.read().split("\n" )[1:-1] a__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] a__ : Tuple = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Union[str, Any] = {} a__ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a__ : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _UpperCamelCase( self : Dict ): return len(self.encoder ) def _UpperCamelCase( self : Tuple ): return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ): if token in self.cache: return self.cache[token] a__ : str = tuple(lowerCamelCase__ ) a__ : Dict = get_pairs(lowerCamelCase__ ) if not pairs: return token while True: a__ : List[str] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a__, a__ : Union[str, Any] = bigram a__ : Any = [] a__ : Union[str, Any] = 0 while i < len(lowerCamelCase__ ): try: a__ : Dict = word.index(lowerCamelCase__ , lowerCamelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a__ : List[str] = j if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a__ : Tuple = tuple(lowerCamelCase__ ) a__ : Union[str, Any] = new_word if len(lowerCamelCase__ ) == 1: break else: a__ : str = get_pairs(lowerCamelCase__ ) a__ : Union[str, Any] = " ".join(lowerCamelCase__ ) a__ : Union[str, Any] = word return word def _UpperCamelCase( self : int , lowerCamelCase__ : Tuple ): a__ : Optional[int] = [] for token in re.findall(self.pat , lowerCamelCase__ ): a__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(" " ) ) return bpe_tokens def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any ): return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.decoder.get(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Any ): a__ : Optional[Any] = "".join(lowerCamelCase__ ) a__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): if not os.path.isdir(lowerCamelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return a__ : Dict = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a__ : List[Any] = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" ) a__ : Dict = 0 with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) a__ : List[Any] = token_index writer.write(" ".join(lowerCamelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ : str = [self.cls_token_id] a__ : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _UpperCamelCase( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1] def _UpperCamelCase( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : int = [self.sep_token_id] a__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : List[Any] ): a__ : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()): a__ : List[Any] = " " + text return (text, kwargs) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : Dict = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : int = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : int = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Dict = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
def UpperCamelCase_ ( ) -> int: return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(__a , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
from itertools import permutations def UpperCamelCase_ ( __a ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a__ : Tuple = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCamelCase_ ( __a = 10 ) -> int: return sum( int("".join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(f"""{solution() = }""")
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
import math def UpperCamelCase_ ( __a , __a ) -> Dict: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(__a ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCamelCase : Tuple = """Enter the base and the power separated by a comma: """ UpperCamelCase , UpperCamelCase : str = map(int, input(prompt).split(""",""")) UpperCamelCase , UpperCamelCase : Tuple = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCamelCase : Any = res(xa, ya) UpperCamelCase : List[str] = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase : Tuple = random.Random() if is_torch_available(): import torch def UpperCamelCase_ ( __a , __a=1.0 , __a=None , __a=None ) -> Dict: if rng is None: a__ : int = global_rng a__ : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : Tuple=400 , lowerCamelCase__ : List[str]=2_000 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , ): a__ : List[Any] = parent a__ : Dict = batch_size a__ : List[Any] = min_seq_length a__ : Tuple = max_seq_length a__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a__ : Dict = feature_size a__ : Any = padding_value a__ : List[Any] = sampling_rate a__ : Tuple = return_attention_mask a__ : int = do_normalize def _UpperCamelCase( self : Tuple ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _UpperCamelCase( self : str , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[Any]=False ): def _flatten(lowerCamelCase__ : Dict ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: a__ : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size a__ : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a__ : Any = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = ASTFeatureExtractor def _UpperCamelCase( self : Dict ): a__ : List[Any] = ASTFeatureExtractionTester(self ) def _UpperCamelCase( self : str ): # Tests that all call wrap to encode_plus and batch_encode_plus a__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 a__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] a__ : str = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test not batched input a__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values a__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) ) # Test batched a__ : List[str] = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values a__ : str = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. a__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)] a__ : List[Any] = np.asarray(lowerCamelCase__ ) a__ : int = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values a__ : str = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) ) @require_torch def _UpperCamelCase( self : str ): import torch a__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) a__ : int = np.random.rand(100 ).astype(np.floataa ) a__ : List[str] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: a__ : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) a__ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] ): from datasets import load_dataset a__ : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech a__ : Union[str, Any] = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def _UpperCamelCase( self : str ): # fmt: off a__ : List[Any] = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on a__ : Union[str, Any] = self._load_datasamples(1 ) a__ : Dict = ASTFeatureExtractor() a__ : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1