code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = 13 _UpperCAmelCase = 7 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = 99 _UpperCAmelCase = 32 _UpperCAmelCase = 2 _UpperCAmelCase = 4 _UpperCAmelCase = 37 _UpperCAmelCase = 'gelu' _UpperCAmelCase = 0.1 _UpperCAmelCase = 0.1 _UpperCAmelCase = 512 _UpperCAmelCase = 16 _UpperCAmelCase = 2 _UpperCAmelCase = 0.02 _UpperCAmelCase = 3 _UpperCAmelCase = 4 _UpperCAmelCase = None def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = TFDistilBertModel(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = TFDistilBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = TFDistilBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFDistilBertForSequenceClassification(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFDistilBertForMultipleChoice(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFDistilBertForTokenClassification(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : List[Any] = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) _a : int = ( { 'feature-extraction': TFDistilBertModel, 'fill-mask': TFDistilBertForMaskedLM, 'question-answering': TFDistilBertForQuestionAnswering, 'text-classification': TFDistilBertForSequenceClassification, 'token-classification': TFDistilBertForTokenClassification, 'zero-shot': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) _a : Dict = False _a : List[Any] = False def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = TFDistilBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): _UpperCAmelCase = TFDistilBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_tf class __a ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = TFDistilBertModel.from_pretrained('distilbert-base-uncased' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = [1, 6, 768] self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.constant( [ [ [0.19261885, -0.13732955, 0.4119799], [0.22150156, -0.07422661, 0.39037204], [0.22756018, -0.0896414, 0.3701467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging lowerCAmelCase__ :Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__() if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' ) self.register_modules( speech_model=_SCREAMING_SNAKE_CASE , speech_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": _UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" self.enable_attention_slicing(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.speech_processor.feature_extractor( _SCREAMING_SNAKE_CASE , return_tensors='pt' , sampling_rate=_SCREAMING_SNAKE_CASE ).input_features.to(self.device ) _UpperCAmelCase = self.speech_model.generate(_SCREAMING_SNAKE_CASE , max_length=480000 ) _UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , normalize=_SCREAMING_SNAKE_CASE )[ 0 ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 1 elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(_SCREAMING_SNAKE_CASE )}.''' ) # get prompt text embeddings _UpperCAmelCase = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) _UpperCAmelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length] _UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = text_embeddings.shape _UpperCAmelCase = text_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) _UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _UpperCAmelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _UpperCAmelCase = 42 if negative_prompt is None: _UpperCAmelCase = [''] * batch_size elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !=''' f''' {type(_SCREAMING_SNAKE_CASE )}.''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = [negative_prompt] elif batch_size != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ' the batch size of `prompt`.' ) else: _UpperCAmelCase = negative_prompt _UpperCAmelCase = text_input_ids.shape[-1] _UpperCAmelCase = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) _UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _UpperCAmelCase = uncond_embeddings.shape[1] _UpperCAmelCase = uncond_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) _UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _UpperCAmelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _UpperCAmelCase = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device='cpu' , dtype=_SCREAMING_SNAKE_CASE ).to( self.device ) else: _UpperCAmelCase = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) _UpperCAmelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _UpperCAmelCase = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # predict the noise residual _UpperCAmelCase = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE ).sample # perform guidance if do_classifier_free_guidance: _UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 ) _UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = 1 / 0.18215 * latents _UpperCAmelCase = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample _UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return image return StableDiffusionPipelineOutput(images=_SCREAMING_SNAKE_CASE , nsfw_content_detected=_SCREAMING_SNAKE_CASE )
329
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowerCAmelCase__ :str = pytest.mark.integration lowerCAmelCase__ :str = {'''comet'''} lowerCAmelCase__ :str = importlib.util.find_spec('''fairseq''') is not None lowerCAmelCase__ :Optional[int] = {'''code_eval'''} lowerCAmelCase__ :Tuple = os.name == '''nt''' lowerCAmelCase__ :Any = {'''bertscore''', '''frugalscore''', '''perplexity'''} lowerCAmelCase__ :Optional[int] = importlib.util.find_spec('''transformers''') is not None def lowerCAmelCase__ ( a__: str ) -> str: '''simple docstring''' @wraps(a__ ) def wrapper(self: Any , a__: List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , a__ ) return wrapper def lowerCAmelCase__ ( a__: List[str] ) -> Tuple: '''simple docstring''' @wraps(a__ ) def wrapper(self: Tuple , a__: Optional[int] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , a__ ) return wrapper def lowerCAmelCase__ ( a__: Optional[int] ) -> Any: '''simple docstring''' @wraps(a__ ) def wrapper(self: List[str] , a__: List[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , a__ ) return wrapper def lowerCAmelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @local class __a ( parameterized.TestCase ): _a : List[Any] = {} _a : Optional[int] = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = '[...]' _UpperCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , _SCREAMING_SNAKE_CASE ) ).module_path ) _UpperCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=_SCREAMING_SNAKE_CASE ) # check parameters _UpperCAmelCase = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(_SCREAMING_SNAKE_CASE , metric_module.__name__ ): with self.use_local_metrics(): try: _UpperCAmelCase = doctest.testmod(_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , raise_on_error=_SCREAMING_SNAKE_CASE ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = '[...]' _UpperCAmelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , _SCREAMING_SNAKE_CASE ) ).module_path ) # run doctest with self.use_local_metrics(): _UpperCAmelCase = doctest.testmod(_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , raise_on_error=_SCREAMING_SNAKE_CASE ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_SCREAMING_SNAKE_CASE ): yield else: yield @contextmanager def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" def load_local_metric(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return load_metric(os.path.join('metrics' , _SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) with patch('datasets.load_metric' ) as mock_load_metric: _UpperCAmelCase = load_local_metric yield @classmethod def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" def wrapper(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = contextmanager(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCAmelCase__ ( a__: Optional[int] ) -> Union[str, Any]: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __a ( UpperCAmelCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _UpperCAmelCase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' import torch def bert_cos_score_idf(a__: Tuple , a__: Dict , *a__: Optional[Any] , **a__: str ): return torch.tensor([[1.0, 1.0, 1.0]] * len(a__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _UpperCAmelCase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCAmelCase__ ( a__: Optional[int] ) -> int: '''simple docstring''' def load_from_checkpoint(a__: Dict ): class __a : def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" assert len(_SCREAMING_SNAKE_CASE ) == 2 _UpperCAmelCase = [0.19, 0.92] return scores, sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _UpperCAmelCase = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _UpperCAmelCase = load_from_checkpoint yield def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = load_metric(os.path.join('metrics' , 'seqeval' ) ) _UpperCAmelCase = 'ERROR' _UpperCAmelCase = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(a__ , match=re.escape(a__ ) ): metric.compute(predictions=[] , references=[] , scheme=a__ )
329
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE=0.01 , _SCREAMING_SNAKE_CASE=1000 ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = p_stop _UpperCAmelCase = max_length def __iter__( self ) -> str: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = False while not stop and count < self.max_length: yield count count += 1 _UpperCAmelCase = random.random() < self.p_stop class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> int: """simple docstring""" _UpperCAmelCase = [ BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 ) ] _UpperCAmelCase = [list(_SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(_SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(_SCREAMING_SNAKE_CASE ) for e in expected] ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1]], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[[0, 1]], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _UpperCAmelCase = [BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False ) -> List[Any]: """simple docstring""" random.seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = list(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ IterableDatasetShard( _SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE , num_processes=_SCREAMING_SNAKE_CASE , process_index=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , ) for i in range(_SCREAMING_SNAKE_CASE ) ] _UpperCAmelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(_SCREAMING_SNAKE_CASE ) iterable_dataset_lists.append(list(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _UpperCAmelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(_SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 ) _UpperCAmelCase = [] for idx in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ): reference += reference self.assertListEqual(_SCREAMING_SNAKE_CASE , reference[: len(_SCREAMING_SNAKE_CASE )] ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = 42 _UpperCAmelCase = RandomIterableDataset() self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) # Edge case with a very small dataset _UpperCAmelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = SkipBatchSampler(_SCREAMING_SNAKE_CASE , 2 ) self.assertListEqual(list(_SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) _UpperCAmelCase = skip_first_batches(_SCREAMING_SNAKE_CASE , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" Accelerator() _UpperCAmelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
1
def lowerCAmelCase__ ( a__: int ) -> list: '''simple docstring''' if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence _UpperCAmelCase = gray_code_sequence_string(a__ ) # # convert them to integers for i in range(len(a__ ) ): _UpperCAmelCase = int(sequence[i] , 2 ) return sequence def lowerCAmelCase__ ( a__: int ) -> list: '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _UpperCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _UpperCAmelCase = gray_code_sequence_string(bit_count - 1 ) _UpperCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _UpperCAmelCase = '0' + smaller_sequence[i] sequence.append(a__ ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _UpperCAmelCase = '1' + smaller_sequence[i] sequence.append(a__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase__ :str = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __a : _a : List[Any] = PegasusConfig _a : List[str] = {} _a : Any = 'gelu' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , ) -> str: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_pegasus_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.encode(inputs_dict['input_ids'] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = 20 _UpperCAmelCase = model_class_name(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.encode(inputs_dict['input_ids'] ) _UpperCAmelCase , _UpperCAmelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCAmelCase__ ( a__: Optional[Any] , a__: Tuple , a__: Any , a__: Tuple=None , a__: Dict=None , ) -> Any: '''simple docstring''' if attention_mask is None: _UpperCAmelCase = np.not_equal(a__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __a ( UpperCAmelCase , unittest.TestCase ): _a : Any = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _a : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _a : Optional[int] = True _a : Union[str, Any] = False _a : Tuple = False _a : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = FlaxPegasusModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) @jax.jit def encode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) with self.subTest('JIT Enabled' ): _UpperCAmelCase = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _UpperCAmelCase = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _UpperCAmelCase = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return model.decode( decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , ) with self.subTest('JIT Enabled' ): _UpperCAmelCase = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _UpperCAmelCase = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.ones((1, 1) ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) _UpperCAmelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) _UpperCAmelCase = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] _UpperCAmelCase = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='np' , truncation=_SCREAMING_SNAKE_CASE , max_length=512 , padding=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.generate(**_SCREAMING_SNAKE_CASE , num_beams=2 ).sequences _UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) assert tgt_text == decoded
329
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
1
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def lowerCAmelCase__ ( a__: np.ndarray , a__: np.ndarray ) -> float: '''simple docstring''' return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a__ , a__ ) ) ) def lowerCAmelCase__ ( a__: np.ndarray , a__: np.ndarray ) -> list[list[list[float] | float]]: '''simple docstring''' if dataset.ndim != value_array.ndim: _UpperCAmelCase = ( 'Wrong input data\'s dimensions... ' F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(a__ ) try: if dataset.shape[1] != value_array.shape[1]: _UpperCAmelCase = ( 'Wrong input data\'s shape... ' F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(a__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: _UpperCAmelCase = ( 'Input data have different datatype... ' F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(a__ ) _UpperCAmelCase = [] for value in value_array: _UpperCAmelCase = euclidean(a__ , dataset[0] ) _UpperCAmelCase = dataset[0].tolist() for dataset_value in dataset[1:]: _UpperCAmelCase = euclidean(a__ , a__ ) if dist > temp_dist: _UpperCAmelCase = temp_dist _UpperCAmelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def lowerCAmelCase__ ( a__: np.ndarray , a__: np.ndarray ) -> float: '''simple docstring''' return np.dot(a__ , a__ ) / (norm(a__ ) * norm(a__ )) if __name__ == "__main__": import doctest doctest.testmod()
329
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
1
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor lowerCAmelCase__ :Optional[Any] = random.Random() def lowerCAmelCase__ ( a__: str , a__: List[str]=1.0 , a__: Dict=None , a__: Optional[int]=None ) -> List[Any]: '''simple docstring''' if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=2000 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ) -> str: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = feature_size _UpperCAmelCase = num_mel_bins _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" def _flatten(_SCREAMING_SNAKE_CASE ): return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) ) if equal_length: _UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = SpeechaTextFeatureExtractionTester(self ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features _UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test batched _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase = np.asarray(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase = ['longest', 'max_length', 'do_not_pad'] _UpperCAmelCase = [None, 16, None] for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = feature_extractor( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inputs.input_features _UpperCAmelCase = inputs.attention_mask _UpperCAmelCase = [np.sum(_SCREAMING_SNAKE_CASE ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase = ['longest', 'max_length', 'do_not_pad'] _UpperCAmelCase = [None, 16, None] for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = feature_extractor( _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inputs.input_features _UpperCAmelCase = inputs.attention_mask _UpperCAmelCase = [np.sum(_SCREAMING_SNAKE_CASE ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase = feature_extractor( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = inputs.input_features _UpperCAmelCase = inputs.attention_mask _UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase = feature_extractor( _SCREAMING_SNAKE_CASE , padding='longest' , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = inputs.input_features _UpperCAmelCase = inputs.attention_mask _UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase = feature_extractor( _SCREAMING_SNAKE_CASE , padding='longest' , max_length=16 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = inputs.input_features _UpperCAmelCase = inputs.attention_mask _UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" from datasets import load_dataset _UpperCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _UpperCAmelCase = ds.sort('id' ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on _UpperCAmelCase = self._load_datasamples(1 ) _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
329
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
1
from collections.abc import Callable def lowerCAmelCase__ ( a__: Callable[[float], float] , a__: float , a__: float ) -> float: '''simple docstring''' _UpperCAmelCase = a _UpperCAmelCase = b if function(a__ ) == 0: # one of the a or b is a root for the function return a elif function(a__ ) == 0: return b elif ( function(a__ ) * function(a__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _UpperCAmelCase = start + (end - start) / 2.0 while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7 if function(a__ ) == 0: return mid elif function(a__ ) * function(a__ ) < 0: _UpperCAmelCase = mid else: _UpperCAmelCase = mid _UpperCAmelCase = start + (end - start) / 2.0 return mid def lowerCAmelCase__ ( a__: float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
329
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
1
class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = arr.split(',' ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = [int(self.array[0] )] * len(self.array ) _UpperCAmelCase = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): _UpperCAmelCase = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) _UpperCAmelCase = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowerCAmelCase__ :int = input('''please input some numbers:''') lowerCAmelCase__ :List[str] = SubArray(whole_array) lowerCAmelCase__ :str = array.solve_sub_array() print(('''the results is:''', re))
329
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
1
def lowerCAmelCase__ ( a__: int , a__: int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) _UpperCAmelCase = str(bin(a__ ) )[2:] # remove the leading "0b" _UpperCAmelCase = str(bin(a__ ) )[2:] # remove the leading "0b" _UpperCAmelCase = max(len(a__ ) , len(a__ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
329
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
1
def lowerCAmelCase__ ( a__: int = 5_0_0_0_0_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = int((limit - 2_4) ** (1 / 2) ) _UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , a__ ) ) ) for primea in primes: _UpperCAmelCase = primea * primea for primea in primes: _UpperCAmelCase = primea * primea * primea if square + cube >= limit - 1_6: break for primea in primes: _UpperCAmelCase = primea * primea * primea * primea _UpperCAmelCase = square + cube + tetr if total >= limit: break ret.add(a__ ) return len(a__ ) if __name__ == "__main__": print(f'''{solution() = }''')
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class __a ( UpperCAmelCase ): _a : Tuple = 'time_series_transformer' _a : str = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "student_t" , _SCREAMING_SNAKE_CASE = "nll" , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7] , _SCREAMING_SNAKE_CASE = "mean" , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length or prediction_length _UpperCAmelCase = distribution_output _UpperCAmelCase = loss _UpperCAmelCase = input_size _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence _UpperCAmelCase = scaling _UpperCAmelCase = num_dynamic_real_features _UpperCAmelCase = num_static_real_features _UpperCAmelCase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _UpperCAmelCase = cardinality else: _UpperCAmelCase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _UpperCAmelCase = embedding_dimension else: _UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase = input_size * len(_SCREAMING_SNAKE_CASE ) + self._number_of_features _UpperCAmelCase = d_model _UpperCAmelCase = encoder_attention_heads _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = encoder_ffn_dim _UpperCAmelCase = decoder_ffn_dim _UpperCAmelCase = encoder_layers _UpperCAmelCase = decoder_layers _UpperCAmelCase = dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = encoder_layerdrop _UpperCAmelCase = decoder_layerdrop _UpperCAmelCase = activation_function _UpperCAmelCase = init_std _UpperCAmelCase = use_cache super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
329
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
1
from collections import defaultdict def lowerCAmelCase__ ( a__: str , a__: str ) -> bool: '''simple docstring''' _UpperCAmelCase = first_str.lower().strip() _UpperCAmelCase = second_str.lower().strip() # Remove whitespace _UpperCAmelCase = first_str.replace(' ' , '' ) _UpperCAmelCase = second_str.replace(' ' , '' ) # Strings of different lengths are not anagrams if len(a__ ) != len(a__ ): return False # Default values for count should be 0 _UpperCAmelCase = defaultdict(a__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(a__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase__ :Union[str, Any] = input('''Enter the first string ''').strip() lowerCAmelCase__ :Tuple = input('''Enter the second string ''').strip() lowerCAmelCase__ :List[str] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
329
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
1
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = 13 _UpperCAmelCase = 7 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 99 _UpperCAmelCase = 384 _UpperCAmelCase = 2 _UpperCAmelCase = 4 _UpperCAmelCase = 37 _UpperCAmelCase = 'gelu' _UpperCAmelCase = 0.1 _UpperCAmelCase = 0.1 _UpperCAmelCase = 512 _UpperCAmelCase = 16 _UpperCAmelCase = 2 _UpperCAmelCase = 0.02 _UpperCAmelCase = 3 _UpperCAmelCase = 4 _UpperCAmelCase = 128 _UpperCAmelCase = 2 _UpperCAmelCase = 9 _UpperCAmelCase = 1 _UpperCAmelCase = None def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = TFConvBertModel(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _a : str = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _a : List[Any] = False _a : List[str] = False _a : Dict = False def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = TFConvBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = True if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ): _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) _UpperCAmelCase = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(model(_SCREAMING_SNAKE_CASE ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' ) _UpperCAmelCase = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: _UpperCAmelCase = outputs['encoder_hidden_states'] _UpperCAmelCase = outputs['encoder_attentions'] else: _UpperCAmelCase = outputs['hidden_states'] _UpperCAmelCase = outputs['attentions'] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) _UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) _UpperCAmelCase = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(out_len % 2 , 0 ) _UpperCAmelCase = outputs.decoder_attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) ) self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) @require_tf class __a ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = [1, 6, 768] self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
329
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
1
import requests lowerCAmelCase__ :Union[str, Any] = '''''' # <-- Put your OpenWeatherMap appid here! lowerCAmelCase__ :Tuple = '''https://api.openweathermap.org/data/2.5/''' def lowerCAmelCase__ ( a__: str = "Chicago" , a__: str = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCAmelCase__ ( a__: str = "Kolkata, India" , a__: str = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCAmelCase__ ( a__: float = 55.68 , a__: float = 12.57 , a__: str = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: lowerCAmelCase__ :Dict = input('''Enter a location:''').strip() if location: pprint(current_weather(location)) else: break
329
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 13 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 37 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = [2, 2, 2, 2] , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , ) -> Any: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride _UpperCAmelCase = num_attention_outputs _UpperCAmelCase = embed_dim _UpperCAmelCase = embed_dim + 1 _UpperCAmelCase = resolution _UpperCAmelCase = depths _UpperCAmelCase = hidden_sizes _UpperCAmelCase = dim _UpperCAmelCase = mlp_expansion_ratio def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = TFEfficientFormerModel(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : Optional[int] = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _a : str = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _a : Optional[Any] = False _a : List[Any] = False _a : Any = False _a : List[str] = False _a : int = False def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = TFEfficientFormerModelTester(self ) _UpperCAmelCase = ConfigTester( self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) if hasattr(self.model_tester , 'encoder_seq_length' ): _UpperCAmelCase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: _UpperCAmelCase = seq_length * self.model_tester.chunk_length else: _UpperCAmelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _UpperCAmelCase = outputs.decoder_hidden_states self.asseretIsInstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr(self.model_tester , 'seq_length' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> int: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFEfficientFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , 'seq_length' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = getattr(self.model_tester , 'chunk_length' , _SCREAMING_SNAKE_CASE ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): _UpperCAmelCase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _UpperCAmelCase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_SCREAMING_SNAKE_CASE ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.assertTrue(outputs_dict is not None ) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' ) # forward pass _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' ) # forward pass _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
329
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
1
from random import shuffle import tensorflow as tf from numpy import array def lowerCAmelCase__ ( a__: int , a__: List[Any] ) -> Dict: '''simple docstring''' _UpperCAmelCase = int(a__ ) assert noofclusters < len(a__ ) # Find out the dimensionality _UpperCAmelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors _UpperCAmelCase = list(range(len(a__ ) ) ) shuffle(a__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _UpperCAmelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _UpperCAmelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _UpperCAmelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _UpperCAmelCase = tf.placeholder('float64' , [dim] ) _UpperCAmelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(a__ , a__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _UpperCAmelCase = [tf.Variable(0 ) for i in range(len(a__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _UpperCAmelCase = tf.placeholder('int32' ) _UpperCAmelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(a__ , a__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _UpperCAmelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _UpperCAmelCase = tf.reduce_mean(a__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input _UpperCAmelCase = tf.placeholder('float' , [dim] ) _UpperCAmelCase = tf.placeholder('float' , [dim] ) _UpperCAmelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _UpperCAmelCase = tf.placeholder('float' , [noofclusters] ) _UpperCAmelCase = tf.argmin(a__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _UpperCAmelCase = tf.initialize_all_variables() # Initialize all variables sess.run(a__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _UpperCAmelCase = 1_0_0 for _ in range(a__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(a__ ) ): _UpperCAmelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _UpperCAmelCase = [ sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _UpperCAmelCase = sess.run( a__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(a__ ): # Collect all the vectors assigned to this cluster _UpperCAmelCase = [ vectors[i] for i in range(len(a__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _UpperCAmelCase = sess.run( a__ , feed_dict={mean_input: array(a__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _UpperCAmelCase = sess.run(a__ ) _UpperCAmelCase = sess.run(a__ ) return centroids, assignments
329
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
1
from __future__ import annotations def lowerCAmelCase__ ( a__: int , a__: int ) -> tuple[int, int]: '''simple docstring''' if b == 0: return (1, 0) ((_UpperCAmelCase) , (_UpperCAmelCase)) = extended_euclid(a__ , a % b ) _UpperCAmelCase = a // b return (y, x - k * y) def lowerCAmelCase__ ( a__: int , a__: int , a__: int , a__: int ) -> int: '''simple docstring''' ((_UpperCAmelCase) , (_UpperCAmelCase)) = extended_euclid(a__ , a__ ) _UpperCAmelCase = na * na _UpperCAmelCase = ra * x * na + ra * y * na return (n % m + m) % m def lowerCAmelCase__ ( a__: int , a__: int ) -> int: '''simple docstring''' ((_UpperCAmelCase) , (_UpperCAmelCase)) = extended_euclid(a__ , a__ ) if b < 0: _UpperCAmelCase = (b % n + n) % n return b def lowerCAmelCase__ ( a__: int , a__: int , a__: int , a__: int ) -> int: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = invert_modulo(a__ , a__ ), invert_modulo(a__ , a__ ) _UpperCAmelCase = na * na _UpperCAmelCase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ :Any = logging.get_logger(__name__) lowerCAmelCase__ :List[str] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase__ :Tuple = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCAmelCase__ :Optional[Any] = {'''facebook/blenderbot_small-90M''': 5_1_2} def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCAmelCase = char _UpperCAmelCase = set(a__ ) return pairs class __a ( UpperCAmelCase ): _a : Any = VOCAB_FILES_NAMES _a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="__start__" , _SCREAMING_SNAKE_CASE="__end__" , _SCREAMING_SNAKE_CASE="__unk__" , _SCREAMING_SNAKE_CASE="__null__" , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {v: k for k, v in self.encoder.items()} with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle: _UpperCAmelCase = merges_handle.read().split('\n' )[1:-1] _UpperCAmelCase = [tuple(merge.split() ) for merge in merges] _UpperCAmelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) _UpperCAmelCase = {} @property def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] _UpperCAmelCase = re.sub('([.,!?()])' , R' \1' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = re.sub('(\')' , R' \1 ' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = re.sub(R'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE ) if "\n" in token: _UpperCAmelCase = token.replace('\n' , ' __newln__' ) _UpperCAmelCase = token.split(' ' ) _UpperCAmelCase = [] for token in tokens: if not len(_SCREAMING_SNAKE_CASE ): continue _UpperCAmelCase = token.lower() _UpperCAmelCase = tuple(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) _UpperCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE ) if not pairs: words.append(_SCREAMING_SNAKE_CASE ) continue while True: _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _UpperCAmelCase , _UpperCAmelCase = bigram _UpperCAmelCase = [] _UpperCAmelCase = 0 while i < len(_SCREAMING_SNAKE_CASE ): try: _UpperCAmelCase = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) new_word.extend(word[i:j] ) _UpperCAmelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _UpperCAmelCase = tuple(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = new_word if len(_SCREAMING_SNAKE_CASE ) == 1: break else: _UpperCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '@@ '.join(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = word[:-4] _UpperCAmelCase = word words.append(_SCREAMING_SNAKE_CASE ) return " ".join(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = re.findall(R'\S+\n?' , _SCREAMING_SNAKE_CASE ) for token in words: split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) ) return split_tokens def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = token.lower() return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip() return out_string def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' ) _UpperCAmelCase = 0 with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _UpperCAmelCase = token_index writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' ) index += 1 return vocab_file, merge_file
329
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class __a ( UpperCAmelCase ): @staticmethod @abstractmethod def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" raise NotImplementedError() @abstractmethod def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" raise NotImplementedError()
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ :List[str] = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Dict = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :List[Any] = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :str = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
1
from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase__ ( a__: int = 1_0_0_0_0_0_0 , a__: int = 1_0 ) -> int: '''simple docstring''' _UpperCAmelCase = defaultdict(a__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _UpperCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _UpperCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(a__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'''{solution() = }''')
329
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=None , ) -> int: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = decoder_seq_length # For common tests _UpperCAmelCase = self.decoder_seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = d_model _UpperCAmelCase = d_model _UpperCAmelCase = decoder_layers _UpperCAmelCase = decoder_layers _UpperCAmelCase = decoder_ffn_dim _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = eos_token_id _UpperCAmelCase = bos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = decoder_start_token_id _UpperCAmelCase = use_cache _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = None _UpperCAmelCase = decoder_seq_length _UpperCAmelCase = 2 _UpperCAmelCase = 1 def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _UpperCAmelCase = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = True _UpperCAmelCase = TrOCRDecoder(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) ) self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 ) _UpperCAmelCase = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )['last_hidden_state'] _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )['last_hidden_state'] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () _a : List[str] = (TrOCRForCausalLM,) if is_torch_available() else () _a : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} _a : Optional[Any] = True _a : List[Any] = False def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass
329
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
1
import math def lowerCAmelCase__ ( a__: int ) -> str: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while num > 0: _UpperCAmelCase = num % 8 _UpperCAmelCase = octal + (remainder * math.floor(math.pow(1_0 , a__ ) )) counter += 1 _UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'''0o{int(a__ )}''' def lowerCAmelCase__ ( ) -> None: '''simple docstring''' print('\n2 in octal is:' ) print(decimal_to_octal(2 ) ) # = 2 print('\n8 in octal is:' ) print(decimal_to_octal(8 ) ) # = 10 print('\n65 in octal is:' ) print(decimal_to_octal(6_5 ) ) # = 101 print('\n216 in octal is:' ) print(decimal_to_octal(2_1_6 ) ) # = 330 print('\n512 in octal is:' ) print(decimal_to_octal(5_1_2 ) ) # = 1000 print('\n' ) if __name__ == "__main__": main()
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' if is_torch_version('<' , '2.0.0' ) or not hasattr(a__ , '_dynamo' ): return False return isinstance(a__ , torch._dynamo.eval_frame.OptimizedModule ) def lowerCAmelCase__ ( a__: Tuple , a__: bool = True ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _UpperCAmelCase = is_compiled_module(a__ ) if is_compiled: _UpperCAmelCase = model _UpperCAmelCase = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(a__ , a__ ): _UpperCAmelCase = model.module if not keep_fpaa_wrapper: _UpperCAmelCase = getattr(a__ , 'forward' ) _UpperCAmelCase = model.__dict__.pop('_original_forward' , a__ ) if original_forward is not None: while hasattr(a__ , '__wrapped__' ): _UpperCAmelCase = forward.__wrapped__ if forward == original_forward: break _UpperCAmelCase = forward if getattr(a__ , '_converted_to_transformer_engine' , a__ ): convert_model(a__ , to_transformer_engine=a__ ) if is_compiled: _UpperCAmelCase = model _UpperCAmelCase = compiled_model return model def lowerCAmelCase__ ( ) -> str: '''simple docstring''' PartialState().wait_for_everyone() def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] ) -> Tuple: '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(a__ , a__ ) elif PartialState().local_process_index == 0: torch.save(a__ , a__ ) @contextmanager def lowerCAmelCase__ ( **a__: Union[str, Any] ) -> List[str]: '''simple docstring''' for key, value in kwargs.items(): _UpperCAmelCase = str(a__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCAmelCase__ ( a__: str ) -> Tuple: '''simple docstring''' if not hasattr(a__ , '__qualname__' ) and not hasattr(a__ , '__name__' ): _UpperCAmelCase = getattr(a__ , '__class__' , a__ ) if hasattr(a__ , '__qualname__' ): return obj.__qualname__ if hasattr(a__ , '__name__' ): return obj.__name__ return str(a__ ) def lowerCAmelCase__ ( a__: Optional[Any] , a__: Union[str, Any] ) -> Optional[int]: '''simple docstring''' for key, value in source.items(): if isinstance(a__ , a__ ): _UpperCAmelCase = destination.setdefault(a__ , {} ) merge_dicts(a__ , a__ ) else: _UpperCAmelCase = value return destination def lowerCAmelCase__ ( a__: int = None ) -> bool: '''simple docstring''' if port is None: _UpperCAmelCase = 2_9_5_0_0 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase__ ( a__: Any , a__: int ) -> Dict: '''simple docstring''' assert isinstance(a__ , a__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCAmelCase__ ( a__: int , a__: str , a__: List[Any] ) -> Dict: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = ParquetDatasetReader(a__ , cache_dir=a__ , keep_in_memory=a__ ).read() _check_parquet_dataset(a__ , a__ ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCAmelCase__ ( a__: str , a__: Optional[int] , a__: List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = ParquetDatasetReader(a__ , features=a__ , cache_dir=a__ ).read() _check_parquet_dataset(a__ , a__ ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCAmelCase__ ( a__: Dict , a__: Optional[int] , a__: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = ParquetDatasetReader(a__ , cache_dir=a__ , split=a__ ).read() _check_parquet_dataset(a__ , a__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCAmelCase__ ( a__: Optional[Any] , a__: int , a__: int ) -> Tuple: '''simple docstring''' if issubclass(a__ , a__ ): _UpperCAmelCase = parquet_path elif issubclass(a__ , a__ ): _UpperCAmelCase = [parquet_path] _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = ParquetDatasetReader(a__ , cache_dir=a__ ).read() _check_parquet_dataset(a__ , a__ ) def lowerCAmelCase__ ( a__: int , a__: str , a__: int=("train",) ) -> Union[str, Any]: '''simple docstring''' assert isinstance(a__ , a__ ) for split in splits: _UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCAmelCase__ ( a__: str , a__: Tuple , a__: Optional[Any] ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = ParquetDatasetReader( {'train': parquet_path} , cache_dir=a__ , keep_in_memory=a__ ).read() _check_parquet_datasetdict(a__ , a__ ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCAmelCase__ ( a__: str , a__: int , a__: List[Any] ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = ParquetDatasetReader({'train': parquet_path} , features=a__ , cache_dir=a__ ).read() _check_parquet_datasetdict(a__ , a__ ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCAmelCase__ ( a__: Dict , a__: Optional[int] , a__: Optional[Any] ) -> str: '''simple docstring''' if split: _UpperCAmelCase = {split: parquet_path} else: _UpperCAmelCase = 'train' _UpperCAmelCase = {'train': parquet_path, 'test': parquet_path} _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = ParquetDatasetReader(a__ , cache_dir=a__ ).read() _check_parquet_datasetdict(a__ , a__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase__ ( a__: int , a__: Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = ParquetDatasetWriter(a__ , tmp_path / 'foo.parquet' ) assert writer.write() > 0 _UpperCAmelCase = pq.ParquetFile(tmp_path / 'foo.parquet' ) _UpperCAmelCase = pf.read() assert dataset.data.table == output_table def lowerCAmelCase__ ( a__: Any , a__: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = str(shared_datadir / 'test_image_rgb.jpg' ) _UpperCAmelCase = {'image': [image_path]} _UpperCAmelCase = Features({'image': Image()} ) _UpperCAmelCase = Dataset.from_dict(a__ , features=a__ ) _UpperCAmelCase = ParquetDatasetWriter(a__ , tmp_path / 'foo.parquet' ) assert writer.write() > 0 _UpperCAmelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features _UpperCAmelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=a__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' , [ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase__ ( a__: Union[str, Any] , a__: str ) -> Optional[Any]: '''simple docstring''' assert get_writer_batch_size(a__ ) == expected
329
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
1
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __a : _a : Any = XGLMConfig _a : Optional[int] = {} _a : List[str] = 'gelu' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , ) -> Dict: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = d_model _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = ffn_dim _UpperCAmelCase = activation_function _UpperCAmelCase = activation_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = initializer_range _UpperCAmelCase = None _UpperCAmelCase = 0 _UpperCAmelCase = 2 _UpperCAmelCase = 1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = self.get_config() _UpperCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _a : int = (TFXGLMForCausalLM,) if is_tf_available() else () _a : int = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) _a : Dict = False _a : int = False _a : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = TFXGLMModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFXGLMModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" super().test_resize_token_embeddings() @require_tf class __a ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" _UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) _UpperCAmelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _UpperCAmelCase = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on _UpperCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , _SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) _UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) _UpperCAmelCase = tokenizer('Today is a nice day and' , return_tensors='tf' ) _UpperCAmelCase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): _UpperCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , seed=[7, 0] ) _UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) _UpperCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) _UpperCAmelCase = 'left' # use different length sentences to test batching _UpperCAmelCase = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='tf' , padding=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inputs['input_ids'] _UpperCAmelCase = model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) _UpperCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _UpperCAmelCase = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_new_tokens=12 ) _UpperCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _UpperCAmelCase = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_new_tokens=12 ) _UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
329
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Union[str, Any] = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __a ( UpperCAmelCase ): _a : List[Any] = 'perceiver' def __init__( self , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=26 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="kv" , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=262 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=56 , _SCREAMING_SNAKE_CASE=[368, 496] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=1920 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **_SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = num_latents _UpperCAmelCase = d_latents _UpperCAmelCase = d_model _UpperCAmelCase = num_blocks _UpperCAmelCase = num_self_attends_per_block _UpperCAmelCase = num_self_attention_heads _UpperCAmelCase = num_cross_attention_heads _UpperCAmelCase = qk_channels _UpperCAmelCase = v_channels _UpperCAmelCase = cross_attention_shape_for_attention _UpperCAmelCase = self_attention_widening_factor _UpperCAmelCase = cross_attention_widening_factor _UpperCAmelCase = hidden_act _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_query_residual # masked language modeling attributes _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings # image classification attributes _UpperCAmelCase = image_size # flow attributes _UpperCAmelCase = train_size # multimodal autoencoding attributes _UpperCAmelCase = num_frames _UpperCAmelCase = audio_samples_per_frame _UpperCAmelCase = samples_per_patch _UpperCAmelCase = output_shape class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def UpperCAmelCase__ ( self ) -> float: """simple docstring""" return 1e-4 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [' '.join(['a'] ) * seq_length] * batch_size _UpperCAmelCase = dict(preprocessor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = inputs.pop('input_ids' ) return inputs elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch ) _UpperCAmelCase = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
329
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __a ( UpperCAmelCase , unittest.TestCase ): _a : Any = RobertaTokenizer _a : Dict = RobertaTokenizerFast _a : List[str] = True _a : Dict = {'cls_token': '<s>'} def UpperCAmelCase__ ( self ) -> str: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _UpperCAmelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) _UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , **_SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = 'lower newer' _UpperCAmelCase = 'lower newer' return input_text, output_text def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) # , add_prefix_space=True) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_SCREAMING_SNAKE_CASE ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_SCREAMING_SNAKE_CASE ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.tokenizer_class.from_pretrained('roberta-base' ) _UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode( 'sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = 'Encode this sequence.' _UpperCAmelCase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Testing spaces after special tokens _UpperCAmelCase = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )} ) # mask token has a left space _UpperCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = 'Encode <mask> sequence' _UpperCAmelCase = 'Encode <mask>sequence' _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoded.index(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoded.index(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = 'A, <mask> AllenNLP sentence.' _UpperCAmelCase = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( _SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _SCREAMING_SNAKE_CASE ) self.assertEqual(post_processor_state['add_prefix_space'] , _SCREAMING_SNAKE_CASE ) self.assertEqual(post_processor_state['trim_offsets'] , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _UpperCAmelCase = f'''{text_of_1_token} {text_of_1_token}''' _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ) + 1, len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ) + 1, len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ), len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_SCREAMING_SNAKE_CASE ), len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , ) _UpperCAmelCase = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_SCREAMING_SNAKE_CASE ) + 1, 1 + len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_SCREAMING_SNAKE_CASE ), 1 + len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer_r(_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_SCREAMING_SNAKE_CASE ), 1 + len(_SCREAMING_SNAKE_CASE ) + 1 + len(_SCREAMING_SNAKE_CASE )) , )
329
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __a ( UpperCAmelCase ): def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'width_multiplier' ) ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE="swish" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.25 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = make_divisible(512 * width_multiplier , divisor=8 ) _UpperCAmelCase = hidden_act _UpperCAmelCase = conv_kernel_size _UpperCAmelCase = output_stride _UpperCAmelCase = classifier_dropout_prob _UpperCAmelCase = use_labels _UpperCAmelCase = is_training _UpperCAmelCase = num_labels _UpperCAmelCase = initializer_range _UpperCAmelCase = scope _UpperCAmelCase = width_multiplier _UpperCAmelCase = ffn_dropout _UpperCAmelCase = attn_dropout def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = MobileViTVaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = MobileViTVaForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = MobileViTVaForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) _a : str = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) _a : Optional[Any] = False _a : str = False _a : List[str] = False _a : List[str] = False def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = MobileViTVaModelTester(self ) _UpperCAmelCase = MobileViTVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = 5 self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _UpperCAmelCase = 2 for i in range(len(_SCREAMING_SNAKE_CASE ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = MobileViTVaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _UpperCAmelCase = model.to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits # verify the logits _UpperCAmelCase = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=_SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _UpperCAmelCase = model.to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.detach().cpu() _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] ) _UpperCAmelCase = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
329
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Tuple = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __a ( UpperCAmelCase ): _a : Optional[Any] = 'cvt' def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[7, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1] , _SCREAMING_SNAKE_CASE=[64, 192, 384] , _SCREAMING_SNAKE_CASE=[1, 3, 6] , _SCREAMING_SNAKE_CASE=[1, 2, 10] , _SCREAMING_SNAKE_CASE=[4.0, 4.0, 4.0] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.1] , _SCREAMING_SNAKE_CASE=[True, True, True] , _SCREAMING_SNAKE_CASE=[False, False, True] , _SCREAMING_SNAKE_CASE=["dw_bn", "dw_bn", "dw_bn"] , _SCREAMING_SNAKE_CASE=[3, 3, 3] , _SCREAMING_SNAKE_CASE=[1, 1, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 1, 1] , _SCREAMING_SNAKE_CASE=[1, 1, 1] , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = depth _UpperCAmelCase = mlp_ratio _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = drop_rate _UpperCAmelCase = drop_path_rate _UpperCAmelCase = qkv_bias _UpperCAmelCase = cls_token _UpperCAmelCase = qkv_projection_method _UpperCAmelCase = kernel_qkv _UpperCAmelCase = padding_kv _UpperCAmelCase = stride_kv _UpperCAmelCase = padding_q _UpperCAmelCase = stride_q _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps
329
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
1
from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase__ :str = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: Any , a__: List[str] ) -> List[Any]: '''simple docstring''' try: with open(a__ , 'rb' ) as flax_state_f: _UpperCAmelCase = from_bytes(a__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(a__ ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(a__ , a__ ) def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Any ) -> Any: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights _UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda a__ : x.dtype == jnp.bfloataa , a__ ) ).values() if any(a__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) _UpperCAmelCase = jax.tree_util.tree_map( lambda a__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a__ ) _UpperCAmelCase = '' _UpperCAmelCase = flatten_dict(a__ , sep='.' ) _UpperCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys _UpperCAmelCase = [] _UpperCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _UpperCAmelCase = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] _UpperCAmelCase = jnp.transpose(a__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] _UpperCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(a__ ): _UpperCAmelCase = ( flax_key_tuple_string.replace('_0' , '.0' ) .replace('_1' , '.1' ) .replace('_2' , '.2' ) .replace('_3' , '.3' ) .replace('_4' , '.4' ) .replace('_5' , '.5' ) .replace('_6' , '.6' ) .replace('_7' , '.7' ) .replace('_8' , '.8' ) .replace('_9' , '.9' ) ) _UpperCAmelCase = '.'.join(a__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict _UpperCAmelCase = np.asarray(a__ ) if not isinstance(a__ , np.ndarray ) else flax_tensor _UpperCAmelCase = torch.from_numpy(a__ ) # remove from missing keys missing_keys.remove(a__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(a__ ) pt_model.load_state_dict(a__ ) # re-transform missing_keys to list _UpperCAmelCase = list(a__ ) if len(a__ ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(a__ ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ' use it for predictions and inference.' ) return pt_model
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
import tensorflow as tf from ...tf_utils import shape_list class __a ( tf.keras.layers.Layer ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = d_embed _UpperCAmelCase = d_proj _UpperCAmelCase = cutoffs + [vocab_size] _UpperCAmelCase = [0] + self.cutoffs _UpperCAmelCase = div_val _UpperCAmelCase = self.cutoffs[0] _UpperCAmelCase = len(self.cutoffs ) - 1 _UpperCAmelCase = self.shortlist_size + self.n_clusters _UpperCAmelCase = keep_order _UpperCAmelCase = [] _UpperCAmelCase = [] def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if self.n_clusters > 0: _UpperCAmelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name='cluster_weight' ) _UpperCAmelCase = self.add_weight( shape=(self.n_clusters,) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name='cluster_bias' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: _UpperCAmelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name=f'''out_projs_._{i}''' , ) self.out_projs.append(_SCREAMING_SNAKE_CASE ) else: self.out_projs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name=f'''out_layers_._{i}_._weight''' , ) _UpperCAmelCase = self.add_weight( shape=(self.vocab_size,) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name=f'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): _UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _UpperCAmelCase = self.d_embed // (self.div_val**i) _UpperCAmelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name=f'''out_projs_._{i}''' ) self.out_projs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name=f'''out_layers_._{i}_._weight''' , ) _UpperCAmelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_SCREAMING_SNAKE_CASE , name=f'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(_SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" _UpperCAmelCase = x if proj is not None: _UpperCAmelCase = tf.einsum('ibd,ed->ibe' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return tf.einsum('ibd,nd->ibn' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) + b @staticmethod def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = shape_list(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.range(lp_size[0] , dtype=target.dtype ) _UpperCAmelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = 0 if self.n_clusters == 0: _UpperCAmelCase = self._logit(_SCREAMING_SNAKE_CASE , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: _UpperCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 ) else: _UpperCAmelCase = shape_list(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [] _UpperCAmelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): _UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: _UpperCAmelCase = (target >= l_idx) & (target < r_idx) _UpperCAmelCase = tf.where(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.boolean_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - l_idx if self.div_val == 1: _UpperCAmelCase = self.out_layers[0][0][l_idx:r_idx] _UpperCAmelCase = self.out_layers[0][1][l_idx:r_idx] else: _UpperCAmelCase = self.out_layers[i][0] _UpperCAmelCase = self.out_layers[i][1] if i == 0: _UpperCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) _UpperCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) _UpperCAmelCase = self._logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.out_projs[0] ) _UpperCAmelCase = tf.nn.log_softmax(_SCREAMING_SNAKE_CASE ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: _UpperCAmelCase = tf.boolean_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._gather_logprob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = self._logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.out_projs[i] ) _UpperCAmelCase = tf.nn.log_softmax(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster _UpperCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(_SCREAMING_SNAKE_CASE ) if target is not None: _UpperCAmelCase = tf.boolean_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tf.boolean_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._gather_logprob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(_SCREAMING_SNAKE_CASE , -cur_logprob , shape_list(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = tf.concat(_SCREAMING_SNAKE_CASE , axis=-1 ) if target is not None: if return_mean: _UpperCAmelCase = tf.reduce_mean(_SCREAMING_SNAKE_CASE ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(_SCREAMING_SNAKE_CASE ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(_SCREAMING_SNAKE_CASE , name=self.name , aggregation='mean' if return_mean else '' ) return out
329
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ :List[str] = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class __a ( UpperCAmelCase , unittest.TestCase ): _a : Dict = DebertaVaTokenizer _a : List[Any] = DebertaVaTokenizerFast _a : int = True _a : List[str] = True def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , unk_token='<unk>' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = 'this is a test' _UpperCAmelCase = 'this is a test' return input_text, output_text def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = '<pad>' _UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '[PAD]' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 30001 ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = ' \tHeLLo!how \n Are yoU? ' _UpperCAmelCase = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = ' \tHeLLo!how \n Are yoU? ' _UpperCAmelCase = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = 'This is a test' _UpperCAmelCase = [13, 1, 4398, 25, 21, 1289] _UpperCAmelCase = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] _UpperCAmelCase = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # fmt: off _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] _UpperCAmelCase = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] _UpperCAmelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on _UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.encode('sequence builders' ) _UpperCAmelCase = tokenizer.encode('multi-sequence build' ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE , ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
329
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
1
import operator as op def lowerCAmelCase__ ( a__: str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = lambda a__ , a__ : int(x / y ) # noqa: E731 integer division operation _UpperCAmelCase = { '^': op.pow, '*': op.mul, '/': div, '+': op.add, '-': op.sub, } # operators & their respective operation # print table header print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' ) print('-' * (3_0 + len(a__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(a__ ) , sep=' | ' ) else: _UpperCAmelCase = stack.pop() # pop stack # output in tabular format print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(a__ ) , sep=' | ' ) _UpperCAmelCase = stack.pop() # pop stack # output in tabular format print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(a__ ) , sep=' | ' ) stack.append( str(opr[x](int(a__ ) , int(a__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(a__ ) , sep=' | ' , ) return int(stack[0] ) if __name__ == "__main__": lowerCAmelCase__ :Optional[int] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
329
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
1
from __future__ import annotations from collections import deque class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(_SCREAMING_SNAKE_CASE ) self.set_fail_transitions() def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None: """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" _UpperCAmelCase = 0 for character in keyword: _UpperCAmelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) _UpperCAmelCase = len(self.adlist ) - 1 else: _UpperCAmelCase = next_state self.adlist[current_state]["output"].append(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> None: """simple docstring""" _UpperCAmelCase = deque() for node in self.adlist[0]["next_states"]: q.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = 0 while q: _UpperCAmelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.adlist[r]['fail_state'] while ( self.find_next_state(_SCREAMING_SNAKE_CASE , self.adlist[child]['value'] ) is None and state != 0 ): _UpperCAmelCase = self.adlist[state]['fail_state'] _UpperCAmelCase = self.find_next_state( _SCREAMING_SNAKE_CASE , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: _UpperCAmelCase = 0 _UpperCAmelCase = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> dict[str, list[int]]: """simple docstring""" _UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences _UpperCAmelCase = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): while ( self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) is None and current_state != 0 ): _UpperCAmelCase = self.adlist[current_state]['fail_state'] _UpperCAmelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) if next_state is None: _UpperCAmelCase = 0 else: _UpperCAmelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: _UpperCAmelCase = [] result[key].append(i - len(_SCREAMING_SNAKE_CASE ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
329
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
1
from string import ascii_uppercase lowerCAmelCase__ :Any = {char: i for i, char in enumerate(ascii_uppercase)} lowerCAmelCase__ :Optional[int] = dict(enumerate(ascii_uppercase)) def lowerCAmelCase__ ( a__: str , a__: str ) -> str: '''simple docstring''' _UpperCAmelCase = len(a__ ) _UpperCAmelCase = 0 while True: if x == i: _UpperCAmelCase = 0 if len(a__ ) == len(a__ ): break key += key[i] i += 1 return key def lowerCAmelCase__ ( a__: str , a__: str ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: _UpperCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6 i += 1 cipher_text += dicta[x] return cipher_text def lowerCAmelCase__ ( a__: str , a__: str ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: _UpperCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6 i += 1 or_txt += dicta[x] return or_txt def lowerCAmelCase__ ( ) -> None: '''simple docstring''' _UpperCAmelCase = 'THE GERMAN ATTACK' _UpperCAmelCase = 'SECRET' _UpperCAmelCase = generate_key(a__ , a__ ) _UpperCAmelCase = cipher_text(a__ , a__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(a__ , a__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
329
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
1
from typing import List import numpy as np def lowerCAmelCase__ ( a__: dict ) -> int: '''simple docstring''' _UpperCAmelCase = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) _UpperCAmelCase = max(lists_lengths.values() , default=0 ) return max(1 , a__ ) def lowerCAmelCase__ ( a__: int , a__: int ) -> List[range]: '''simple docstring''' _UpperCAmelCase = [] for group_idx in range(a__ ): _UpperCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCAmelCase = range(a__ , start + num_shards_to_add ) shards_indices_per_group.append(a__ ) return shards_indices_per_group def lowerCAmelCase__ ( a__: dict , a__: int ) -> List[dict]: '''simple docstring''' _UpperCAmelCase = _number_of_shards_in_gen_kwargs(a__ ) if num_shards == 1: return [dict(a__ )] else: _UpperCAmelCase = _distribute_shards(num_shards=a__ , max_num_jobs=a__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(a__ , a__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(a__ ) ) ] def lowerCAmelCase__ ( a__: List[dict] ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , a__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowerCAmelCase__ ( a__: np.random.Generator , a__: dict ) -> dict: '''simple docstring''' _UpperCAmelCase = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )} _UpperCAmelCase = {} for size in list_sizes: _UpperCAmelCase = list(range(a__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCAmelCase = dict(a__ ) for key, value in shuffled_kwargs.items(): if isinstance(a__ , a__ ): _UpperCAmelCase = [value[i] for i in indices_per_size[len(a__ )]] return shuffled_kwargs
329
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
1
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase__ :Union[str, Any] = logging.get_logger(__name__) class __a ( enum.Enum ): _a : Union[str, Any] = 0 _a : List[Any] = 1 @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): _a : Any = 'generated' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = {} if truncation is not None: _UpperCAmelCase = truncation _UpperCAmelCase = generate_kwargs _UpperCAmelCase = {} if return_tensors is not None and return_type is None: _UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: _UpperCAmelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCAmelCase = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _UpperCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else '' if isinstance(args[0] , _SCREAMING_SNAKE_CASE ): if self.tokenizer.pad_token_id is None: raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' ) _UpperCAmelCase = ([prefix + arg for arg in args[0]],) _UpperCAmelCase = True elif isinstance(args[0] , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = (prefix + args[0],) _UpperCAmelCase = False else: raise ValueError( f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' ) _UpperCAmelCase = self.tokenizer(*_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if ( isinstance(args[0] , _SCREAMING_SNAKE_CASE ) and all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for el in args[0] ) and all(len(_SCREAMING_SNAKE_CASE ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self._parse_and_tokenize(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if self.framework == "pt": _UpperCAmelCase , _UpperCAmelCase = model_inputs['input_ids'].shape elif self.framework == "tf": _UpperCAmelCase , _UpperCAmelCase = tf.shape(model_inputs['input_ids'] ).numpy() _UpperCAmelCase = generate_kwargs.get('min_length' , self.model.config.min_length ) _UpperCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length ) self.check_inputs(_SCREAMING_SNAKE_CASE , generate_kwargs['min_length'] , generate_kwargs['max_length'] ) _UpperCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = output_ids.shape[0] if self.framework == "pt": _UpperCAmelCase = output_ids.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": _UpperCAmelCase = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.TEXT , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: _UpperCAmelCase = {f'''{self.return_name}_token_ids''': output_ids} elif return_type == ReturnType.TEXT: _UpperCAmelCase = { f'''{self.return_name}_text''': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): _a : int = 'summary' def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if max_length < min_length: logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' ) if input_length < max_length: logger.warning( f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ''' 'a summarization task, where outputs shorter than the input are typically wanted, you might ' f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' ) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): _a : Dict = 'translation' def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ''' 'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' ) return True def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" if getattr(self.tokenizer , '_build_translation_inputs' , _SCREAMING_SNAKE_CASE ): return self.tokenizer._build_translation_inputs( *_SCREAMING_SNAKE_CASE , return_tensors=self.framework , truncation=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE ) else: return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = super()._sanitize_parameters(**_SCREAMING_SNAKE_CASE ) if src_lang is not None: _UpperCAmelCase = src_lang if tgt_lang is not None: _UpperCAmelCase = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. _UpperCAmelCase = kwargs.get('task' , self.task ) _UpperCAmelCase = task.split('_' ) if task and len(_SCREAMING_SNAKE_CASE ) == 4: # translation, XX, to YY _UpperCAmelCase = items[1] _UpperCAmelCase = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
329
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowerCAmelCase__ :List[str] = logging.get_logger(__name__) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , _SCREAMING_SNAKE_CASE , ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
1
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_multiple_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = weight_tying _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = True return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" _UpperCAmelCase = GPTNeoXJapaneseModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = True _UpperCAmelCase = GPTNeoXJapaneseModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" _UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = True _UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() # first forward pass _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = output_from_no_past['hidden_states'][0] _UpperCAmelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )['hidden_states'][0] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _a : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _a : Optional[int] = ( {'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _a : List[str] = False _a : int = False _a : str = False _a : int = False def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = GPTNeoXJapaneseModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() _UpperCAmelCase = None self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = 'abeja/gpt-neox-japanese-2.7b' _UpperCAmelCase = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、'] _UpperCAmelCase = [ 'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。', '100年後に必要とされる会社は、「人」が中心の会社です。', 'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。', '国境の長いトンネルを抜けると、そこは雪国だった。', '美味しい日本食といえば、やっぱりお寿司ですよね。', ] _UpperCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [] for prompt in prompts: _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids _UpperCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , max_length=50 ) _UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) predicted_outputs += generated_string self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
329
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
1
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head: _UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head: _UpperCAmelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" try: _UpperCAmelCase = tempfile.mktemp() with open(_SCREAMING_SNAKE_CASE , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AlbertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) finally: os.remove(_SCREAMING_SNAKE_CASE ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class __a ( unittest.TestCase ): _a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def UpperCAmelCase__ ( cls ) -> str: """simple docstring""" _UpperCAmelCase = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( cls ) -> List[Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.txt' ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) _UpperCAmelCase = BertTokenizer(_SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) _UpperCAmelCase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='test-tokenizer' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) _UpperCAmelCase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.txt' ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) _UpperCAmelCase = BertTokenizer(_SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) _UpperCAmelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) _UpperCAmelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def UpperCAmelCase__ ( self ) -> str: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.txt' ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) _UpperCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) _UpperCAmelCase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.txt' ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) _UpperCAmelCase = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) _UpperCAmelCase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) _UpperCAmelCase = AutoTokenizer.from_pretrained( f'''{USER}/test-dynamic-tokenizer''' , use_fast=_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = Trie() _UpperCAmelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_SCREAMING_SNAKE_CASE , ['AB', 'C'] )
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
import math def lowerCAmelCase__ ( a__: int ) -> bool: '''simple docstring''' _UpperCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(a__ ) def lowerCAmelCase__ ( a__: float = 1 / 1_2_3_4_5 ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 3 while True: _UpperCAmelCase = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(a__ ): _UpperCAmelCase = int(a__ ) total_partitions += 1 if check_partition_perfect(a__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(a__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
329
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __a ( UpperCAmelCase ): _a : Optional[Any] = ['image_processor', 'tokenizer'] _a : Tuple = 'ChineseCLIPImageProcessor' _a : Optional[int] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = kwargs.pop('feature_extractor' ) _UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processor def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if images is not None: _UpperCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None and images is not None: _UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor_class
329
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase__ ( a__: str , a__: str , a__: str ) -> Dict: '''simple docstring''' def get_masked_lm_array(a__: str ): _UpperCAmelCase = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _UpperCAmelCase = tf.train.load_variable(a__ , a__ ) if "kernel" in name: _UpperCAmelCase = array.transpose() return torch.from_numpy(a__ ) def get_encoder_array(a__: str ): _UpperCAmelCase = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _UpperCAmelCase = tf.train.load_variable(a__ , a__ ) if "kernel" in name: _UpperCAmelCase = array.transpose() return torch.from_numpy(a__ ) def get_encoder_layer_array(a__: int , a__: str ): _UpperCAmelCase = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _UpperCAmelCase = tf.train.load_variable(a__ , a__ ) if "kernel" in name: _UpperCAmelCase = array.transpose() return torch.from_numpy(a__ ) def get_encoder_attention_layer_array(a__: int , a__: str , a__: str ): _UpperCAmelCase = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _UpperCAmelCase = tf.train.load_variable(a__ , a__ ) _UpperCAmelCase = array.reshape(a__ ) if "kernel" in name: _UpperCAmelCase = array.transpose() return torch.from_numpy(a__ ) print(F'''Loading model based on config from {config_path}...''' ) _UpperCAmelCase = BertConfig.from_json_file(a__ ) _UpperCAmelCase = BertForMaskedLM(a__ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): _UpperCAmelCase = model.bert.encoder.layer[layer_index] # Self-attention _UpperCAmelCase = layer.attention.self _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_query_dense/kernel' , self_attn.query.weight.data.shape ) _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_query_dense/bias' , self_attn.query.bias.data.shape ) _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_key_dense/kernel' , self_attn.key.weight.data.shape ) _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_key_dense/bias' , self_attn.key.bias.data.shape ) _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_value_dense/kernel' , self_attn.value.weight.data.shape ) _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output _UpperCAmelCase = layer.attention.output _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_output_dense/kernel' , self_output.dense.weight.data.shape ) _UpperCAmelCase = get_encoder_attention_layer_array( a__ , '_output_dense/bias' , self_output.dense.bias.data.shape ) _UpperCAmelCase = get_encoder_layer_array(a__ , '_attention_layer_norm/gamma' ) _UpperCAmelCase = get_encoder_layer_array(a__ , '_attention_layer_norm/beta' ) # Intermediate _UpperCAmelCase = layer.intermediate _UpperCAmelCase = get_encoder_layer_array(a__ , '_intermediate_dense/kernel' ) _UpperCAmelCase = get_encoder_layer_array(a__ , '_intermediate_dense/bias' ) # Output _UpperCAmelCase = layer.output _UpperCAmelCase = get_encoder_layer_array(a__ , '_output_dense/kernel' ) _UpperCAmelCase = get_encoder_layer_array(a__ , '_output_dense/bias' ) _UpperCAmelCase = get_encoder_layer_array(a__ , '_output_layer_norm/gamma' ) _UpperCAmelCase = get_encoder_layer_array(a__ , '_output_layer_norm/beta' ) # Embeddings _UpperCAmelCase = get_encoder_array('_position_embedding_layer/embeddings' ) _UpperCAmelCase = get_encoder_array('_type_embedding_layer/embeddings' ) _UpperCAmelCase = get_encoder_array('_embedding_norm_layer/gamma' ) _UpperCAmelCase = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head _UpperCAmelCase = model.cls.predictions.transform _UpperCAmelCase = get_masked_lm_array('dense/kernel' ) _UpperCAmelCase = get_masked_lm_array('dense/bias' ) _UpperCAmelCase = get_masked_lm_array('layer_norm/gamma' ) _UpperCAmelCase = get_masked_lm_array('layer_norm/beta' ) _UpperCAmelCase = get_masked_lm_array('embedding_table' ) # Pooling _UpperCAmelCase = BertPooler(config=a__ ) _UpperCAmelCase = get_encoder_array('_pooler_layer/kernel' ) _UpperCAmelCase = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(a__ ) # Integration test - should load without any errors ;) _UpperCAmelCase = BertForMaskedLM.from_pretrained(a__ ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": lowerCAmelCase__ :Tuple = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model.''', ) lowerCAmelCase__ :Tuple = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
329
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class __a ( UpperCAmelCase ): _a : int = 'deit' def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=16 , **_SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = qkv_bias _UpperCAmelCase = encoder_stride class __a ( UpperCAmelCase ): _a : Any = version.parse('1.11' ) @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase__ ( self ) -> float: """simple docstring""" return 1e-4
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
1
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCAmelCase__ ( a__: Features ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(a__: FeatureType ) -> None: nonlocal batch_size if isinstance(a__ , a__ ): _UpperCAmelCase = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(a__ , a__ ): _UpperCAmelCase = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(a__ , a__ ) and feature.dtype == "binary": _UpperCAmelCase = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(a__ , a__ ) return None if batch_size is np.inf else batch_size class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__( _SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] _UpperCAmelCase = Parquet( cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory ) return dataset class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: _UpperCAmelCase = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs ) return written def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('path_or_buf' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) for offset in logging.tqdm( range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(_SCREAMING_SNAKE_CASE ) written += batch.nbytes writer.close() return written
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> List[str]: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DetaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = DetaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DetaImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DetaImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ :List[Any] = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :int = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :str = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Dict = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase__ :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
1
lowerCAmelCase__ :str = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on lowerCAmelCase__ :str = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCAmelCase__ ( a__: str ) -> str: '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCAmelCase__ ( a__: str ) -> str: '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' _UpperCAmelCase = 'Morse code here!' print(a__ ) _UpperCAmelCase = encrypt(a__ ) print(a__ ) _UpperCAmelCase = decrypt(a__ ) print(a__ ) if __name__ == "__main__": main()
329
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
1
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE = "▁" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "<unk>" , _SCREAMING_SNAKE_CASE = "</s>" , _SCREAMING_SNAKE_CASE = "<pad>" , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } _UpperCAmelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): _UpperCAmelCase = token_dict['token'] _UpperCAmelCase = Tokenizer(Unigram() ) _UpperCAmelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) _UpperCAmelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ), pre_tokenizers.Digits(individual_digits=_SCREAMING_SNAKE_CASE ), pre_tokenizers.Punctuation(), ] ) _UpperCAmelCase = decoders.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = TemplateProcessing( single=f'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) _UpperCAmelCase = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8000 , _SCREAMING_SNAKE_CASE = True , ) -> Tuple: """simple docstring""" _UpperCAmelCase = trainers.UnigramTrainer( vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = [files] self._tokenizer.train(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE ) self.add_unk_id() def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8000 , _SCREAMING_SNAKE_CASE = True , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = trainers.UnigramTrainer( vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , ) self._tokenizer.train_from_iterator(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE ) self.add_unk_id() def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = json.loads(self._tokenizer.to_str() ) _UpperCAmelCase = self.special_tokens['unk']['id'] _UpperCAmelCase = Tokenizer.from_str(json.dumps(_SCREAMING_SNAKE_CASE ) )
329
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Any = { '''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : Dict = 'mra' def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="full" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = type_vocab_size _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = block_per_row _UpperCAmelCase = approx_mode _UpperCAmelCase = initial_prior_first_n_blocks _UpperCAmelCase = initial_prior_diagonal_n_blocks
329
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ :Any = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :List[str] = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Tuple = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCAmelCase__ :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
1
import math import qiskit def lowerCAmelCase__ ( a__: int = 1 , a__: int = 1 , a__: int = 1 ) -> qiskit.result.counts.Counts: '''simple docstring''' if ( isinstance(a__ , a__ ) or isinstance(a__ , a__ ) or isinstance(a__ , a__ ) ): raise TypeError('inputs must be integers.' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('inputs must be positive.' ) if ( (math.floor(a__ ) != input_a) or (math.floor(a__ ) != input_a) or (math.floor(a__ ) != carry_in) ): raise ValueError('inputs must be exact integers.' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('inputs must be less or equal to 2.' ) # build registers _UpperCAmelCase = qiskit.QuantumRegister(4 , 'qr' ) _UpperCAmelCase = qiskit.ClassicalRegister(2 , 'cr' ) # list the entries _UpperCAmelCase = [input_a, input_a, carry_in] _UpperCAmelCase = qiskit.QuantumCircuit(a__ , a__ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(a__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(a__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(a__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , a__ ) # measure the last two qbits _UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' ) _UpperCAmelCase = qiskit.execute(a__ , a__ , shots=1_0_0_0 ) return job.result().get_counts(a__ ) if __name__ == "__main__": print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
329
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :List[str] = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: str , a__: Tuple=False , a__: Tuple=False , a__: List[Any]=False ) -> str: '''simple docstring''' _UpperCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def lowerCAmelCase__ ( a__: List[Any] , a__: Optional[int] ) -> List[Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): _UpperCAmelCase = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def lowerCAmelCase__ ( a__: Any ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(a__ , a__ ) def lowerCAmelCase__ ( a__: int , a__: Optional[Any] , a__: Dict ) -> Dict: '''simple docstring''' _UpperCAmelCase = dct.pop(a__ ) _UpperCAmelCase = val @torch.no_grad() def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a__ ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False if "vqa" in checkpoint_url: _UpperCAmelCase = True _UpperCAmelCase = 3_1_2_9 _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = 'vqa2-id2label.json' _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} _UpperCAmelCase = ViltForQuestionAnswering(a__ ) elif "nlvr" in checkpoint_url: _UpperCAmelCase = True _UpperCAmelCase = 2 _UpperCAmelCase = {0: 'False', 1: 'True'} _UpperCAmelCase = {v: k for k, v in config.idalabel.items()} _UpperCAmelCase = 3 _UpperCAmelCase = ViltForImagesAndTextClassification(a__ ) elif "irtr" in checkpoint_url: _UpperCAmelCase = True _UpperCAmelCase = ViltForImageAndTextRetrieval(a__ ) elif "mlm_itm" in checkpoint_url: _UpperCAmelCase = True _UpperCAmelCase = ViltForMaskedLM(a__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys _UpperCAmelCase = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['state_dict'] _UpperCAmelCase = create_rename_keys(a__ , a__ , a__ , a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ ) if mlm_model or irtr_model: _UpperCAmelCase = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(a__ , a__ ) # load state dict into HuggingFace model model.eval() if mlm_model: _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(a__ , strict=a__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(a__ ) # Define processor _UpperCAmelCase = ViltImageProcessor(size=3_8_4 ) _UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) _UpperCAmelCase = ViltProcessor(a__ , a__ ) # Forward pass on example inputs (image + text) if nlvr_model: _UpperCAmelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a__ ).raw ) _UpperCAmelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a__ ).raw ) _UpperCAmelCase = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) _UpperCAmelCase = processor(a__ , a__ , return_tensors='pt' ) _UpperCAmelCase = processor(a__ , a__ , return_tensors='pt' ) _UpperCAmelCase = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: _UpperCAmelCase = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a__ ).raw ) if mlm_model: _UpperCAmelCase = 'a bunch of [MASK] laying on a [MASK].' else: _UpperCAmelCase = 'How many cats are there?' _UpperCAmelCase = processor(a__ , a__ , return_tensors='pt' ) _UpperCAmelCase = model(**a__ ) # Verify outputs if mlm_model: _UpperCAmelCase = torch.Size([1, 1_1, 3_0_5_2_2] ) _UpperCAmelCase = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a__ , atol=1e-4 ) # verify masked token prediction equals "cats" _UpperCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: _UpperCAmelCase = torch.Size([1, 3_1_2_9] ) _UpperCAmelCase = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a__ , atol=1e-4 ) # verify vqa prediction equals "2" _UpperCAmelCase = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: _UpperCAmelCase = torch.Size([1, 2] ) _UpperCAmelCase = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(a__ ).mkdir(exist_ok=a__ ) print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(a__ ) processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase__ :Union[str, Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
329
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
1
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = 100 _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = scope _UpperCAmelCase = out_indices _UpperCAmelCase = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = num_patches + 1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" _UpperCAmelCase = BeitModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = BeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = BeitForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = BeitForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = BeitForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : Dict = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _a : Dict = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _a : str = False _a : Tuple = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = BeitModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" if not self.model_tester.is_training: return _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(_SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]: continue _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.train() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).loss loss.backward() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _UpperCAmelCase = False _UpperCAmelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(_SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.to(_SCREAMING_SNAKE_CASE ) model.train() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).loss loss.backward() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = _config_zero_init(_SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = BeitModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values.to(_SCREAMING_SNAKE_CASE ) # prepare bool_masked_pos _UpperCAmelCase = torch.ones((1, 196) , dtype=torch.bool ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , bool_masked_pos=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits # verify the logits _UpperCAmelCase = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-2 ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) _UpperCAmelCase = 281 self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits # verify the logits _UpperCAmelCase = torch.Size((1, 21841) ) self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) _UpperCAmelCase = 2396 self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) _UpperCAmelCase = model.to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BeitImageProcessor(do_resize=_SCREAMING_SNAKE_CASE , size=640 , do_center_crop=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) _UpperCAmelCase = Image.open(ds[0]['file'] ) _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits # verify the logits _UpperCAmelCase = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: _UpperCAmelCase = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=_SCREAMING_SNAKE_CASE , ) else: _UpperCAmelCase = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=_SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) _UpperCAmelCase = model.to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BeitImageProcessor(do_resize=_SCREAMING_SNAKE_CASE , size=640 , do_center_crop=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) _UpperCAmelCase = Image.open(ds[0]['file'] ) _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.detach().cpu() _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] ) _UpperCAmelCase = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
329
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
1
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class __a ( UpperCAmelCase ): _a : Optional[Any] = 'imagegpt' _a : Tuple = ['past_key_values'] _a : Optional[Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=512 + 1 , _SCREAMING_SNAKE_CASE=32 * 32 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = n_inner _UpperCAmelCase = activation_function _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = scale_attn_weights _UpperCAmelCase = use_cache _UpperCAmelCase = scale_attn_by_inverse_layer_idx _UpperCAmelCase = reorder_and_upcast_attn _UpperCAmelCase = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ] ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ) -> Mapping[str, Any]: """simple docstring""" _UpperCAmelCase = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
329
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
1
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() lowerCAmelCase__ :Tuple = logging.get_logger(__name__) lowerCAmelCase__ :Union[str, Any] = '''Hello world! cécé herlolip''' def lowerCAmelCase__ ( a__: str , a__: str , a__: bool ) -> str: '''simple docstring''' _UpperCAmelCase = FairseqRobertaModel.from_pretrained(a__ ) roberta.eval() # disable dropout _UpperCAmelCase = roberta.model.encoder.sentence_encoder _UpperCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _UpperCAmelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , a__ ) _UpperCAmelCase = XLMRobertaXLForSequenceClassification(a__ ) if classification_head else XLMRobertaXLForMaskedLM(a__ ) model.eval() # Now let's copy all the weights. # Embeddings _UpperCAmelCase = roberta_sent_encoder.embed_tokens.weight _UpperCAmelCase = roberta_sent_encoder.embed_positions.weight _UpperCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _UpperCAmelCase = roberta_sent_encoder.layer_norm.weight _UpperCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _UpperCAmelCase = model.roberta.encoder.layer[i] _UpperCAmelCase = roberta_sent_encoder.layers[i] _UpperCAmelCase = layer.attention _UpperCAmelCase = roberta_layer.self_attn_layer_norm.weight _UpperCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _UpperCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _UpperCAmelCase = roberta_layer.self_attn.q_proj.weight _UpperCAmelCase = roberta_layer.self_attn.q_proj.bias _UpperCAmelCase = roberta_layer.self_attn.k_proj.weight _UpperCAmelCase = roberta_layer.self_attn.k_proj.bias _UpperCAmelCase = roberta_layer.self_attn.v_proj.weight _UpperCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _UpperCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _UpperCAmelCase = roberta_layer.self_attn.out_proj.weight _UpperCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _UpperCAmelCase = roberta_layer.final_layer_norm.weight _UpperCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _UpperCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _UpperCAmelCase = roberta_layer.fca.weight _UpperCAmelCase = roberta_layer.fca.bias # output _UpperCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _UpperCAmelCase = roberta_layer.fca.weight _UpperCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _UpperCAmelCase = roberta.model.classification_heads['mnli'].dense.weight _UpperCAmelCase = roberta.model.classification_heads['mnli'].dense.bias _UpperCAmelCase = roberta.model.classification_heads['mnli'].out_proj.weight _UpperCAmelCase = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head _UpperCAmelCase = roberta.model.encoder.lm_head.dense.weight _UpperCAmelCase = roberta.model.encoder.lm_head.dense.bias _UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _UpperCAmelCase = roberta.model.encoder.lm_head.weight _UpperCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _UpperCAmelCase = roberta.encode(a__ ).unsqueeze(0 ) # batch of size 1 _UpperCAmelCase = model(a__ )[0] if classification_head: _UpperCAmelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(a__ ) ) else: _UpperCAmelCase = roberta.model(a__ )[0] print(our_output.shape , their_output.shape ) _UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _UpperCAmelCase = torch.allclose(a__ , a__ , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(a__ ).mkdir(parents=a__ , exist_ok=a__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) lowerCAmelCase__ :Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
329
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
1
from math import isqrt def lowerCAmelCase__ ( a__: int ) -> list[int]: '''simple docstring''' _UpperCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , a__ , a__ ): _UpperCAmelCase = False return [i for i in range(2 , a__ ) if is_prime[i]] def lowerCAmelCase__ ( a__: int = 1_0**8 ) -> int: '''simple docstring''' _UpperCAmelCase = calculate_prime_numbers(max_number // 2 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = len(a__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'''{solution() = }''')
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :int = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class __a ( UpperCAmelCase ): _a : Optional[Any] = 'swinv2' _a : int = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=32 , **_SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) ) _UpperCAmelCase = (0, 0, 0, 0)
329
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , _SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , _SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ' padding..' ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if self.optimizer is None: _UpperCAmelCase = ['bias', 'LayerNorm.weight'] _UpperCAmelCase = [ { 'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], 'weight_decay': self.args.weight_decay, }, { 'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {'scale_parameter': False, 'relative_step': False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { 'betas': (self.args.adam_betaa, self.args.adam_betaa), 'eps': self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=_SCREAMING_SNAKE_CASE , optim=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) else: _UpperCAmelCase = optimizer_cls(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(_SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE ) return scheduler def UpperCAmelCase__ ( self ) -> Optional[torch.utils.data.Sampler]: """simple docstring""" if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = inputs.pop('labels' ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return loss def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """simple docstring""" _UpperCAmelCase = self._prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { 'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] ) _UpperCAmelCase = inputs.pop('labels' ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] ) return (loss, logits, labels) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( 'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be' f''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
import numpy as np def lowerCAmelCase__ ( a__: Tuple , a__: Dict , a__: Tuple , a__: Optional[Any] , a__: List[str] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = int(np.ceil((x_end - xa) / h ) ) _UpperCAmelCase = np.zeros((n + 1,) ) _UpperCAmelCase = ya _UpperCAmelCase = xa for k in range(a__ ): _UpperCAmelCase = f(a__ , y[k] ) _UpperCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) _UpperCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) _UpperCAmelCase = f(x + h , y[k] + h * ka ) _UpperCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
329
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
1
from numpy import exp, pi, sqrt def lowerCAmelCase__ ( a__: List[str] , a__: float = 0.0 , a__: float = 1.0 ) -> int: '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
1
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def lowerCAmelCase__ ( a__: Optional[int] ) -> Any: # picklable for multiprocessing '''simple docstring''' return x.sum() def lowerCAmelCase__ ( a__: Optional[Any] ) -> Any: # picklable for multiprocessing '''simple docstring''' return i + 1 @dataclass class __a : _a : int _a : str class __a ( UpperCAmelCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = [1, 2] _UpperCAmelCase = {'a': 1, 'b': 2} _UpperCAmelCase = {'a': [1, 2], 'b': [3, 4]} _UpperCAmelCase = {'a': {'1': 1}, 'b': 2} _UpperCAmelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4} _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = [2, 3] _UpperCAmelCase = {'a': 2, 'b': 3} _UpperCAmelCase = {'a': [2, 3], 'b': [4, 5]} _UpperCAmelCase = {'a': {'1': 2}, 'b': 3} _UpperCAmelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5} self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = 2 self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )} _UpperCAmelCase = {'a': 2, 'b': 0, 'c': 2} _UpperCAmelCase = { 'a': np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ), 'b': np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ), 'c': np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ), } self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = {'a': 1, 'b': 2} _UpperCAmelCase = {'a': 3, 'b': 4} _UpperCAmelCase = {'a': 5, 'b': 6} _UpperCAmelCase = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" class __a : _a : List[Any] = 'bar' _UpperCAmelCase = Foo() self.assertEqual(foo.my_attr , 'bar' ) with temporary_assignment(_SCREAMING_SNAKE_CASE , 'my_attr' , 'BAR' ): self.assertEqual(foo.my_attr , 'BAR' ) self.assertEqual(foo.my_attr , 'bar' ) @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (1_6, 1_6, 1_6), (1_6, 1_7, 1_6), (1_7, 1_6, 1_6), ] , ) def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Any , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: _UpperCAmelCase = {F'''{i}''': i for i in range(a__ )} _UpperCAmelCase = map_nested(lambda a__ : x + 1_0 , a__ , num_proc=a__ , parallel_min_length=1_6 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class __a ( UpperCAmelCase ): @require_tf def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import tensorflow as tf from tensorflow.keras import layers _UpperCAmelCase = layers.Dense(2 ) def gen_random_output(): _UpperCAmelCase = tf.random.uniform((1, 3) ) return model(_SCREAMING_SNAKE_CASE ).numpy() with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = gen_random_output() with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def UpperCAmelCase__ ( self ) -> int: """simple docstring""" import torch def gen_random_output(): _UpperCAmelCase = torch.nn.Linear(3 , 2 ) _UpperCAmelCase = torch.rand(1 , 3 ) return model(_SCREAMING_SNAKE_CASE ).detach().numpy() with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = gen_random_output() with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): _UpperCAmelCase = gen_random_output() with temp_seed(42 ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('input_data' , [{}] ) def lowerCAmelCase__ ( a__: List[Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = NestedDataStructure(a__ ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def lowerCAmelCase__ ( a__: str , a__: Optional[int] ) -> int: '''simple docstring''' _UpperCAmelCase = NestedDataStructure(a__ ).flatten() assert output == expected_output def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = A(x=1 , y='foobar' ) _UpperCAmelCase = {'x': 1, 'y': 'foobar'} assert asdict(a__ ) == expected_output _UpperCAmelCase = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]} _UpperCAmelCase = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]} assert asdict(a__ ) == expected_output with pytest.raises(a__ ): asdict([1, A(x=1_0 , y='foo' )] ) def lowerCAmelCase__ ( a__: str ) -> List[str]: '''simple docstring''' return text.split() def lowerCAmelCase__ ( a__: Any ) -> int: '''simple docstring''' yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def lowerCAmelCase__ ( ) -> str: '''simple docstring''' with Pool(2 ) as pool: _UpperCAmelCase = list(iflatmap_unordered(a__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) ) assert out.count('hello' ) == 1_0 assert out.count('there' ) == 1_0 assert len(a__ ) == 2_0 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _UpperCAmelCase = list(iflatmap_unordered(a__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) ) assert out.count('hello' ) == 1_0 assert out.count('there' ) == 1_0 assert len(a__ ) == 2_0 # check that we get items as fast as possible with Pool(2 ) as pool: _UpperCAmelCase = [] for yield_time, content in iflatmap_unordered( a__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(a__ ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(a__ ) == 4
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
def lowerCAmelCase__ ( a__: list[list[float]] ) -> list[list[float]]: '''simple docstring''' _UpperCAmelCase = [] for data in source_data: for i, el in enumerate(a__ ): if len(a__ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(a__ ) ) return data_lists def lowerCAmelCase__ ( a__: list[list[float]] , a__: list[int] ) -> list[list[float]]: '''simple docstring''' _UpperCAmelCase = [] for dlist, weight in zip(a__ , a__ ): _UpperCAmelCase = min(a__ ) _UpperCAmelCase = max(a__ ) _UpperCAmelCase = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _UpperCAmelCase = F'''Invalid weight of {weight:f} provided''' raise ValueError(a__ ) score_lists.append(a__ ) return score_lists def lowerCAmelCase__ ( a__: list[list[float]] ) -> list[float]: '''simple docstring''' _UpperCAmelCase = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(a__ ): _UpperCAmelCase = final_scores[j] + ele return final_scores def lowerCAmelCase__ ( a__: list[list[float]] , a__: list[int] ) -> list[list[float]]: '''simple docstring''' _UpperCAmelCase = get_data(a__ ) _UpperCAmelCase = calculate_each_score(a__ , a__ ) _UpperCAmelCase = generate_final_scores(a__ ) # append scores to source data for i, ele in enumerate(a__ ): source_data[i].append(a__ ) return source_data
329
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
1
def lowerCAmelCase__ ( a__: str ) -> list: '''simple docstring''' _UpperCAmelCase = [0] * len(a__ ) for i in range(1 , len(a__ ) ): # use last results for better performance - dynamic programming _UpperCAmelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _UpperCAmelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _UpperCAmelCase = j return prefix_result def lowerCAmelCase__ ( a__: str ) -> int: '''simple docstring''' return max(prefix_function(a__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
329
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
1
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch lowerCAmelCase__ :Optional[Any] = '''sshleifer/bart-tiny-random''' lowerCAmelCase__ :Any = '''patrickvonplaten/t5-tiny-random''' @require_torch class __a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=_SCREAMING_SNAKE_CASE ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" with self.assertRaises(_SCREAMING_SNAKE_CASE ): create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE , tempfile.mkdtemp() , e=_SCREAMING_SNAKE_CASE , d=_SCREAMING_SNAKE_CASE )
329
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
1
from __future__ import annotations from typing import TypedDict class __a ( UpperCAmelCase ): _a : str _a : int def lowerCAmelCase__ ( a__: str ) -> list[str]: '''simple docstring''' if not isinstance(a__ , a__ ): raise TypeError('The parameter s type must be str.' ) return [s[i:] + s[:i] for i in range(len(a__ ) )] def lowerCAmelCase__ ( a__: str ) -> BWTTransformDict: '''simple docstring''' if not isinstance(a__ , a__ ): raise TypeError('The parameter s type must be str.' ) if not s: raise ValueError('The parameter s must not be empty.' ) _UpperCAmelCase = all_rotations(a__ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _UpperCAmelCase = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(a__ ), } return response def lowerCAmelCase__ ( a__: str , a__: int ) -> str: '''simple docstring''' if not isinstance(a__ , a__ ): raise TypeError('The parameter bwt_string type must be str.' ) if not bwt_string: raise ValueError('The parameter bwt_string must not be empty.' ) try: _UpperCAmelCase = int(a__ ) except ValueError: raise TypeError( 'The parameter idx_original_string type must be int or passive' ' of cast to int.' ) if idx_original_string < 0: raise ValueError('The parameter idx_original_string must not be lower than 0.' ) if idx_original_string >= len(a__ ): raise ValueError( 'The parameter idx_original_string must be lower than' ' len(bwt_string).' ) _UpperCAmelCase = [''] * len(a__ ) for _ in range(len(a__ ) ): for i in range(len(a__ ) ): _UpperCAmelCase = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCAmelCase__ :int = '''Provide a string that I will generate its BWT transform: ''' lowerCAmelCase__ :int = input(entry_msg).strip() lowerCAmelCase__ :Any = bwt_transform(s) print( f'''Burrows Wheeler transform for string \'{s}\' results ''' f'''in \'{result['bwt_string']}\'''' ) lowerCAmelCase__ :List[Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' f'''we get original string \'{original_string}\'''' )
329
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase ) class __a ( UpperCAmelCase ): _a : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) _a : ClassVar[Features] = Features({'text': Value('string' )} ) _a : ClassVar[Features] = Features({} ) _a : str = "text" @property def UpperCAmelCase__ ( self ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
329
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
1
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class __a ( UpperCAmelCase ): def __lt__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return self[-1] < other[-1] def __eq__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self[-1] == other[-1] def lowerCAmelCase__ ( a__: list ) -> list: '''simple docstring''' _UpperCAmelCase = [] # sort into stacks for element in collection: _UpperCAmelCase = Stack([element] ) _UpperCAmelCase = bisect_left(a__ , a__ ) if i != len(a__ ): stacks[i].append(a__ ) else: stacks.append(a__ ) # use a heap-based merge to merge stack efficiently _UpperCAmelCase = merge(*(reversed(a__ ) for stack in stacks) ) return collection if __name__ == "__main__": lowerCAmelCase__ :Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ :Any = [int(item) for item in user_input.split(''',''')] print(patience_sort(unsorted))
329
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _a ( *a :List[str] ) -> Dict: if not isinstance(a , a ): a = list(a ) for i in range(len(a ) ): a = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _a ( a :Exception ) -> bool: a = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(a , a ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _a ( a :callable = None , a :int = 128 ) -> Optional[Any]: if function is None: return functools.partial(a , starting_batch_size=a ) a = starting_batch_size def decorator(*a :Union[str, Any] , **a :Dict ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() a = list(inspect.signature(a ).parameters.keys() ) # Guard against user error if len(a ) < (len(a ) + 1): a = ''', '''.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(a , *a , **a ) except Exception as e: if should_reduce_batch_size(a ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
0
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
0
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" lowercase__ = int(number**0.5 ) return number == sq * sq def _SCREAMING_SNAKE_CASE (A , A , A , A , A , A ) -> tuple[int, int]: """simple docstring""" lowercase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase__ = x_den * y_den * z_den lowercase__ = gcd(A , A ) top //= hcf bottom //= hcf return top, bottom def _SCREAMING_SNAKE_CASE (A = 35 ) -> int: """simple docstring""" lowercase__ = set() lowercase__ = 42 lowercase__ = Fraction(0 ) lowercase__ = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase__ = x_num * y_den + x_den * y_num lowercase__ = x_den * y_den lowercase__ = gcd(A , A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( A , A , A , A , A , A ) unique_s.add(A ) # n=2 lowercase__ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase__ = x_den * x_den * y_den * y_den if is_sq(A ) and is_sq(A ): lowercase__ = int(sqrt(A ) ) lowercase__ = int(sqrt(A ) ) lowercase__ = gcd(A , A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( A , A , A , A , A , A ) unique_s.add(A ) # n=-1 lowercase__ = x_num * y_num lowercase__ = x_den * y_num + x_num * y_den lowercase__ = gcd(A , A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( A , A , A , A , A , A ) unique_s.add(A ) # n=2 lowercase__ = x_num * x_num * y_num * y_num lowercase__ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(A ) and is_sq(A ): lowercase__ = int(sqrt(A ) ) lowercase__ = int(sqrt(A ) ) lowercase__ = gcd(A , A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( A , A , A , A , A , A ) unique_s.add(A ) for num, den in unique_s: total += Fraction(A , A ) return total.denominator + total.numerator if __name__ == "__main__": print(f"""{solution() = }""")
2
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' while b: A, A : List[Any] = b, a % b return a def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b ) def lowerCAmelCase_ ( ): '''simple docstring''' print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
3
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __snake_case =logging.get_logger(__name__) class UpperCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
4
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
0
import argparse from collections import defaultdict import yaml UpperCAmelCase__ = '''docs/source/en/_toctree.yml''' def UpperCAmelCase_ ( __snake_case ) -> Dict: """simple docstring""" _lowercase =defaultdict(__snake_case ) for doc in model_doc: counts[doc["local"]] += 1 _lowercase =[key for key, value in counts.items() if value > 1] _lowercase =[] for duplicate_key in duplicates: _lowercase =list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__snake_case ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__snake_case , key=lambda __snake_case : s["title"].lower() ) def UpperCAmelCase_ ( __snake_case=False ) -> Optional[int]: """simple docstring""" with open(__snake_case , encoding='''utf-8''' ) as f: _lowercase =yaml.safe_load(f.read() ) # Get to the API doc _lowercase =0 while content[api_idx]["title"] != "API": api_idx += 1 _lowercase =content[api_idx]['''sections'''] # Then to the model doc _lowercase =0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _lowercase =api_doc[model_idx]['''sections'''] _lowercase =[(idx, section) for idx, section in enumerate(__snake_case ) if '''sections''' in section] _lowercase =False for idx, modality_doc in modalities_docs: _lowercase =modality_doc['''sections'''] _lowercase =clean_model_doc_toc(__snake_case ) if old_modality_doc != new_modality_doc: _lowercase =True if overwrite: _lowercase =new_modality_doc if diff: if overwrite: _lowercase =model_doc _lowercase =api_doc with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__snake_case , allow_unicode=__snake_case ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCAmelCase__ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
5
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __A( nn.Module ): def __init__( self , _snake_case = 16 , _snake_case = 88 , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = 32 , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = "geglu" , _snake_case = None , ) -> Any: '''simple docstring''' super().__init__() __a = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case , attention_head_dim=_snake_case , in_channels=_snake_case , num_layers=_snake_case , dropout=_snake_case , norm_num_groups=_snake_case , cross_attention_dim=_snake_case , attention_bias=_snake_case , sample_size=_snake_case , num_vector_embeds=_snake_case , activation_fn=_snake_case , num_embeds_ada_norm=_snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __a = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __a = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __a = [1, 0] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case = True , ) -> Tuple: '''simple docstring''' __a = hidden_states __a = [] __a = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens __a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __a = self.transformer_index_for_condition[i] __a = self.transformers[transformer_index]( _snake_case , encoder_hidden_states=_snake_case , timestep=_snake_case , cross_attention_kwargs=_snake_case , return_dict=_snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] __a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __a = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
6
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
0
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1024 ) -> Union[str, Any]: '''simple docstring''' A__ , A__ = [], [] A__ = list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) A__ , A__ = sorted_examples[0] def is_too_big(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tok(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): A__ = new_src + ' ' + src A__ = new_tgt + ' ' + tgt if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example finished_src.append(SCREAMING_SNAKE_CASE__ ) finished_tgt.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = src, tgt else: # can fit, keep adding A__ , A__ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(SCREAMING_SNAKE_CASE__ ) finished_tgt.append(SCREAMING_SNAKE_CASE__ ) return finished_src, finished_tgt def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: '''simple docstring''' A__ = Path(SCREAMING_SNAKE_CASE__ ) save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) for split in ["train"]: A__ , A__ = data_dir / f'{split}.source', data_dir / f'{split}.target' A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()] A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()] A__ , A__ = pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(f'packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.' ) Path(save_path / f'{split}.source' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) Path(save_path / f'{split}.target' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) for split in ["val", "test"]: A__ , A__ = data_dir / f'{split}.source', data_dir / f'{split}.target' shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.source' ) shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.target' ) def _snake_case( ) -> Optional[int]: '''simple docstring''' A__ = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE__ , default=128 ) parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE__ ) parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE__ ) A__ = parser.parse_args() A__ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
7
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
import pytest import datasets # Import fixture modules as plugins lowerCAmelCase_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? snake_case_ = tmp_path_factory.getbasetemp() / '''cache''' snake_case_ = test_hf_cache_home / '''datasets''' snake_case_ = test_hf_cache_home / '''metrics''' snake_case_ = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(SCREAMING_SNAKE_CASE__ ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(SCREAMING_SNAKE_CASE__ ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE__ ) ) @pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ , scope='''session''' ) def __SCREAMING_SNAKE_CASE (): datasets.disable_progress_bar() @pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # don't take tests into account when counting downloads monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , SCREAMING_SNAKE_CASE__ ) @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , SCREAMING_SNAKE_CASE__ )
8
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
0
from __future__ import annotations from statistics import mean def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = [0] * no_of_processes __SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i] __SCREAMING_SNAKE_CASE : list[int] = [] __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Any = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : Tuple = -1 for i in range(lowercase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(lowercase__ ) if len(lowercase__ ) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __SCREAMING_SNAKE_CASE : List[str] = i total_time += burst_time[target_process] completed += 1 __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Optional[int] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = [0] * no_of_processes for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('[TEST CASE 01]') __lowerCAmelCase : Union[str, Any] =4 __lowerCAmelCase : str =[2, 5, 3, 7] __lowerCAmelCase : Optional[int] =[0, 0, 0, 0] __lowerCAmelCase : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : List[Any] =calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time') for i, process_id in enumerate(list(range(1, 5))): print( f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
9
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = ProphetNetTokenizer lowercase_ = False def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]: '''simple docstring''' super().setUp() lowerCamelCase__: Union[str, Any] =[ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : int) ->Tuple: '''simple docstring''' lowerCamelCase__: int ="UNwant\u00E9d,running" lowerCamelCase__: Optional[Any] ="unwanted, running" return input_text, output_text def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple: '''simple docstring''' lowerCamelCase__: Dict =self.tokenizer_class(self.vocab_file) lowerCamelCase__: Tuple =tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [9, 6, 7, 12, 10, 11]) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"]) def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple: '''simple docstring''' lowerCamelCase__: Dict =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"]) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: List[Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def SCREAMING_SNAKE_CASE_ (self : Any) ->Any: '''simple docstring''' lowerCamelCase__: Any =BasicTokenizer(do_lower_case=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"]) def SCREAMING_SNAKE_CASE_ (self : str) ->Any: '''simple docstring''' lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"]) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"]) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any: '''simple docstring''' lowerCamelCase__: int =BasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: int =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCamelCase__: Any ={} for i, token in enumerate(UpperCAmelCase_): lowerCamelCase__: str =i lowerCamelCase__: Union[str, Any] =WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize("") , []) self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"]) @require_torch def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: int =self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") lowerCamelCase__: int =["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__: Any =[1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102] lowerCamelCase__: Optional[Any] =tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="pt") self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =list(batch.input_ids.numpy()[0]) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict: '''simple docstring''' self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int: '''simple docstring''' self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) @slow def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") lowerCamelCase__: str =tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: int =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_) lowerCamelCase__: int =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
10
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json', 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json', 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json', 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json', 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json', 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json', 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json', 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json', 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json', 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json', } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "xlm" __SCREAMING_SNAKE_CASE = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self , __lowerCamelCase=3_0_1_4_5 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=True , __lowerCamelCase=5_1_2 , __lowerCamelCase=2_0_4_8**-0.5 , __lowerCamelCase=1e-12 , __lowerCamelCase=0.0_2 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase="first" , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=0.1 , __lowerCamelCase=5 , __lowerCamelCase=5 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=0 , **__lowerCamelCase , ) -> Optional[int]: _A : Optional[Any] = vocab_size _A : Optional[Any] = emb_dim _A : Union[str, Any] = n_layers _A : int = n_heads _A : Union[str, Any] = dropout _A : List[str] = attention_dropout _A : Tuple = gelu_activation _A : Dict = sinusoidal_embeddings _A : int = causal _A : int = asm _A : int = n_langs _A : int = use_lang_emb _A : Union[str, Any] = layer_norm_eps _A : Union[str, Any] = bos_index _A : Union[str, Any] = eos_index _A : Tuple = pad_index _A : Any = unk_index _A : Dict = mask_index _A : str = is_encoder _A : Union[str, Any] = max_position_embeddings _A : Optional[int] = embed_init_std _A : List[str] = init_std _A : Optional[int] = summary_type _A : Optional[Any] = summary_use_proj _A : Dict = summary_activation _A : Optional[Any] = summary_proj_to_labels _A : Any = summary_first_dropout _A : str = start_n_top _A : Any = end_n_top _A : int = mask_token_id _A : List[Any] = lang_id if "n_words" in kwargs: _A : Any = kwargs["n_words"] super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase) class lowerCAmelCase__ ( a): '''simple docstring''' @property def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : List[str] = {0: "batch", 1: "choice", 2: "sequence"} else: _A : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ])
11
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = 'swin2sr' UpperCAmelCase__ : List[Any] = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: List[str] , UpperCamelCase_: List[str]=64 , UpperCamelCase_: str=1 , UpperCamelCase_: int=3 , UpperCamelCase_: Any=1_80 , UpperCamelCase_: Optional[Any]=[6, 6, 6, 6, 6, 6] , UpperCamelCase_: Optional[int]=[6, 6, 6, 6, 6, 6] , UpperCamelCase_: Union[str, Any]=8 , UpperCamelCase_: Union[str, Any]=2.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Any=1E-5 , UpperCamelCase_: str=2 , UpperCamelCase_: int=1.0 , UpperCamelCase_: str="1conv" , UpperCamelCase_: Any="pixelshuffle" , **UpperCamelCase_: Optional[Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = upscale __lowerCamelCase = img_range __lowerCamelCase = resi_connection __lowerCamelCase = upsampler
12
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
0
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class __lowercase : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False): SCREAMING_SNAKE_CASE_: Any = scheduler SCREAMING_SNAKE_CASE_: List[str] = optimizers if isinstance(lowerCAmelCase__ , (list, tuple)) else [optimizers] SCREAMING_SNAKE_CASE_: str = split_batches SCREAMING_SNAKE_CASE_: Optional[Any] = step_with_optimizer SCREAMING_SNAKE_CASE_: Dict = GradientState() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : List[Any]): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE_: Union[str, Any] = AcceleratorState().num_processes for _ in range(lowerCAmelCase__): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps"): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__) else: self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): return self.scheduler.get_last_lr() def _SCREAMING_SNAKE_CASE ( self : Dict): return self.scheduler.state_dict() def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any]): self.scheduler.load_state_dict(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): return self.scheduler.get_lr() def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int]): return self.scheduler.print_lr(*lowerCAmelCase__ , **lowerCAmelCase__)
13
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = '''''' for word_or_phrase in separated: if not isinstance(lowercase_ , lowercase_ ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(lowercase_ ) if __name__ == "__main__": from doctest import testmod testmod()
14
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger() SCREAMING_SNAKE_CASE :Optional[dict] = None class UpperCAmelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ): '''simple docstring''' def __init__( self : List[str] ,A : Tuple=None ,A : List[Any]=None ,**A : str ): super().__init__(features=A ) import jax from jaxlib.xla_client import Device if isinstance(A ,A ): raise ValueError( f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) __A = device if isinstance(A ,A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __A = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) __A = str(jax.devices()[0] ) __A = jnp_array_kwargs @staticmethod def UpperCamelCase_ ( ): import jax return {str(A ): device for device in jax.devices()} def UpperCamelCase_ ( self : Any ,A : Tuple ): import jax import jax.numpy as jnp if isinstance(A ,A ) and column: if all( isinstance(A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(A ,axis=0 ) return column def UpperCamelCase_ ( self : int ,A : int ): import jax import jax.numpy as jnp if isinstance(A ,(str, bytes, type(A )) ): return value elif isinstance(A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ): return value.tolist() __A = {} if isinstance(A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __A = {"dtype": jnp.intaa} else: __A = {"dtype": jnp.intaa} elif isinstance(A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ): __A = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(A ,PIL.Image.Image ): __A = np.asarray(A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __A = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(A ,**{**default_dtype, **self.jnp_array_kwargs} ) def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, Any] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(A ,torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(A ,"__array__" ) and not isinstance(A ,jax.Array ): __A = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(A ,np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] ) elif isinstance(A ,(list, tuple) ): return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] ) return self._tensorize(A ) def UpperCamelCase_ ( self : Any ,A : dict ): return map_nested(self._recursive_tensorize ,A ,map_list=A ) def UpperCamelCase_ ( self : List[str] ,A : pa.Table ): __A = self.numpy_arrow_extractor().extract_row(A ) __A = self.python_features_decoder.decode_row(A ) return self.recursive_tensorize(A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : pa.Table ): __A = self.numpy_arrow_extractor().extract_column(A ) __A = self.python_features_decoder.decode_column(A ,pa_table.column_names[0] ) __A = self.recursive_tensorize(A ) __A = self._consolidate(A ) return column def UpperCamelCase_ ( self : Dict ,A : pa.Table ): __A = self.numpy_arrow_extractor().extract_batch(A ) __A = self.python_features_decoder.decode_batch(A ) __A = self.recursive_tensorize(A ) for column_name in batch: __A = self._consolidate(batch[column_name] ) return batch
15
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
0
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase_ = None lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase_ = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : Tuple = VOCAB_FILES_NAMES lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Optional[int] = ["input_ids", "attention_mask"] lowerCAmelCase : int = TaTokenizer lowerCAmelCase : List[int] = [] def __init__( self : Any ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : Optional[Any]="</s>" ,_snake_case : Tuple="<unk>" ,_snake_case : str="<pad>" ,_snake_case : Optional[Any]=100 ,_snake_case : str=None ,**_snake_case : str ,) -> int: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: lowercase__ : Tuple = [f"""<extra_id_{i}>""" for i in range(_snake_case )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ : List[Any] = len(set(filter(lambda _snake_case : bool('''extra_id_''' in str(_snake_case ) ) ,_snake_case ) ) ) if extra_tokens != extra_ids: raise ValueError( f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( _snake_case ,tokenizer_file=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,pad_token=_snake_case ,extra_ids=_snake_case ,additional_special_tokens=_snake_case ,**_snake_case ,) lowercase__ : List[Any] = vocab_file lowercase__ : int = False if not self.vocab_file else True lowercase__ : List[str] = extra_ids @staticmethod def UpperCAmelCase ( _snake_case : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : str ) -> List[str]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f""" {pretrained_model_name_or_path} automatically truncating your input to""" f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,_snake_case ,) return max_model_length def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Union[str, Any] = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file ,_snake_case ) logger.info(f"""Copy vocab file to {out_vocab_file}""" ) return (out_vocab_file,) def UpperCAmelCase ( self : Dict ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : Any = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ : Dict = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCAmelCase ( self : int ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ : Tuple = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return list( set(filter(lambda _snake_case : bool(re.search(r'''<extra_id_\d+>''' ,_snake_case ) ) is not None ,self.additional_special_tokens ) ) ) def UpperCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" return [self.convert_tokens_to_ids(_snake_case ) for token in self.get_sentinel_tokens()]
16
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
0
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _a = logging.get_logger(__name__) _a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED _a = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } _a = { 'allenai/led-base-16384': 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _A ( ) -> Union[str, Any]: '''simple docstring''' __lowercase = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8): if b not in bs: bs.append(UpperCamelCase_) cs.append(2**8 + n) n += 1 __lowercase = [chr(UpperCamelCase_) for n in cs] return dict(zip(UpperCamelCase_, UpperCamelCase_)) def _A ( UpperCamelCase_ : List[Any]) -> Any: '''simple docstring''' __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char)) __lowercase = char return pairs class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Any = VOCAB_FILES_NAMES __UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any]="replace", UpperCAmelCase__ : Tuple="<s>", UpperCAmelCase__ : Optional[Any]="</s>", UpperCAmelCase__ : Any="</s>", UpperCAmelCase__ : Optional[int]="<s>", UpperCAmelCase__ : Tuple="<unk>", UpperCAmelCase__ : Dict="<pad>", UpperCAmelCase__ : int="<mask>", UpperCAmelCase__ : Any=False, **UpperCAmelCase__ : List[Any], ): __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else bos_token __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else eos_token __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else sep_token __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else cls_token __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else unk_token __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else mask_token super().__init__( errors=UpperCAmelCase__, bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, add_prefix_space=UpperCAmelCase__, **UpperCAmelCase__, ) with open(UpperCAmelCase__, encoding="utf-8" ) as vocab_handle: __lowercase = json.load(UpperCAmelCase__ ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase__, encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__ ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[str] ): return len(self.encoder ) def _lowercase ( self : int ): return dict(self.encoder, **self.added_tokens_encoder ) def _lowercase ( self : str, UpperCAmelCase__ : str ): if token in self.cache: return self.cache[token] __lowercase = tuple(UpperCAmelCase__ ) __lowercase = get_pairs(UpperCAmelCase__ ) if not pairs: return token while True: __lowercase = min(UpperCAmelCase__, key=lambda UpperCAmelCase__ : self.bpe_ranks.get(UpperCAmelCase__, float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase ,__lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(UpperCAmelCase__ ): try: __lowercase = word.index(UpperCAmelCase__, UpperCAmelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(UpperCAmelCase__ ) __lowercase = new_word if len(UpperCAmelCase__ ) == 1: break else: __lowercase = get_pairs(UpperCAmelCase__ ) __lowercase = " ".join(UpperCAmelCase__ ) __lowercase = word return word def _lowercase ( self : Tuple, UpperCAmelCase__ : Tuple ): __lowercase = [] for token in re.findall(self.pat, UpperCAmelCase__ ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase__ ).split(" " ) ) return bpe_tokens def _lowercase ( self : Optional[int], UpperCAmelCase__ : int ): return self.encoder.get(UpperCAmelCase__, self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : str ): return self.decoder.get(UpperCAmelCase__ ) def _lowercase ( self : List[Any], UpperCAmelCase__ : Any ): __lowercase = "".join(UpperCAmelCase__ ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors ) return text def _lowercase ( self : str, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCAmelCase__, "w", encoding="utf-8" ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=UpperCAmelCase__, ensure_ascii=UpperCAmelCase__ ) + "\n" ) __lowercase = 0 with open(UpperCAmelCase__, "w", encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda UpperCAmelCase__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(UpperCAmelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Any, UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__, token_ids_a=UpperCAmelCase__, already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase__ )) + [1] return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) + [1] def _lowercase ( self : List[Any], UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None ): __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str=False, **UpperCAmelCase__ : List[Any] ): __lowercase = kwargs.pop("add_prefix_space", self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase__ ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _lowercase ( self : Dict, UpperCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding], UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[bool] = None, ): __lowercase = super()._pad( encoded_inputs=UpperCAmelCase__, max_length=UpperCAmelCase__, padding_strategy=UpperCAmelCase__, pad_to_multiple_of=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(UpperCAmelCase__ ) if needs_to_be_padded: __lowercase = len(UpperCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
0
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _snake_case ( lowerCAmelCase : Features ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = np.inf def set_batch_size(lowerCAmelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(lowerCAmelCase , lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : int = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(lowerCAmelCase , lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(lowerCAmelCase , lowerCAmelCase ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(lowerCAmelCase , lowerCAmelCase ) return None if batch_size is np.inf else batch_size class a__ ( A__ ): def __init__( self : List[str],_A : NestedDataStructureLike[PathLike],_A : Optional[NamedSplit] = None,_A : Optional[Features] = None,_A : str = None,_A : bool = False,_A : bool = False,_A : Optional[int] = None,**_A : int,): """simple docstring""" super().__init__( _A,split=_A,features=_A,cache_dir=_A,keep_in_memory=_A,streaming=_A,num_proc=_A,**_A,) SCREAMING_SNAKE_CASE_ : int = path_or_paths if isinstance(_A,_A ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_ : Tuple = _PACKAGED_DATASETS_MODULES["parquet"][1] SCREAMING_SNAKE_CASE_ : Optional[Any] = Parquet( cache_dir=_A,data_files=_A,features=_A,hash=_A,**_A,) def __UpperCamelCase ( self : str ): """simple docstring""" if self.streaming: SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : List[Any] = None SCREAMING_SNAKE_CASE_ : Any = None self.builder.download_and_prepare( download_config=_A,download_mode=_A,verification_mode=_A,base_path=_A,num_proc=self.num_proc,) SCREAMING_SNAKE_CASE_ : List[Any] = self.builder.as_dataset( split=self.split,verification_mode=_A,in_memory=self.keep_in_memory ) return dataset class a__ : def __init__( self : int,_A : Dataset,_A : Union[PathLike, BinaryIO],_A : Optional[int] = None,**_A : str,): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = dataset SCREAMING_SNAKE_CASE_ : Union[str, Any] = path_or_buf SCREAMING_SNAKE_CASE_ : int = batch_size or get_writer_batch_size(dataset.features ) SCREAMING_SNAKE_CASE_ : List[Any] = parquet_writer_kwargs def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf,(str, bytes, os.PathLike) ): with open(self.path_or_buf,"wb+" ) as buffer: SCREAMING_SNAKE_CASE_ : Any = self._write(file_obj=_A,batch_size=_A,**self.parquet_writer_kwargs ) else: SCREAMING_SNAKE_CASE_ : Dict = self._write(file_obj=self.path_or_buf,batch_size=_A,**self.parquet_writer_kwargs ) return written def __UpperCamelCase ( self : str,_A : BinaryIO,_A : int,**_A : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 0 SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs.pop("path_or_buf",_A ) SCREAMING_SNAKE_CASE_ : Tuple = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE_ : List[str] = pq.ParquetWriter(_A,schema=_A,**_A ) for offset in logging.tqdm( range(0,len(self.dataset ),_A ),unit="ba",disable=not logging.is_progress_bar_enabled(),desc="Creating parquet from Arrow format",): SCREAMING_SNAKE_CASE_ : Any = query_table( table=self.dataset._data,key=slice(_A,offset + batch_size ),indices=self.dataset._indices if self.dataset._indices is not None else None,) writer.write_table(_A ) written += batch.nbytes writer.close() return written
18
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
0
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A =get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ): lowerCAmelCase__ = XLMProphetNetTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True def SCREAMING_SNAKE_CASE_( self ) -> Any: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = "[PAD]" lowerCamelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowercase ) , 1012 ) def SCREAMING_SNAKE_CASE_( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual( lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE_( self ) -> Tuple: return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = "Hello World!" lowerCamelCase_ = [35389, 6672, 49, 2] self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @slow def SCREAMING_SNAKE_CASE_( self ) -> str: # fmt: off lowerCamelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
19
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=18 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=None ,): '''simple docstring''' lowercase : Dict = size if size is not None else {"""shortest_edge""": 20} lowercase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowercase : str = parent lowercase : int = batch_size lowercase : str = num_channels lowercase : int = image_size lowercase : List[str] = min_resolution lowercase : str = max_resolution lowercase : Dict = do_resize lowercase : Dict = size lowercase : Dict = do_center_crop lowercase : str = crop_size def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __snake_case ( lowerCAmelCase , unittest.TestCase ): _a : Any= MobileNetVaImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = MobileNetVaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case ,"""do_resize""" ) ) self.assertTrue(hasattr(snake_case ,"""size""" ) ) self.assertTrue(hasattr(snake_case ,"""do_center_crop""" ) ) self.assertTrue(hasattr(snake_case ,"""crop_size""" ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case ,Image.Image ) # Test not batched input lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowercase : Tuple = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case ,np.ndarray ) # Test not batched input lowercase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case ,torch.Tensor ) # Test not batched input lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
20
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
0