code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase_ ( a__ ): def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) ) class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any: UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : List[Any] = image_size UpperCamelCase : List[str] = patch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 ) UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[int] = conv_kernel_size UpperCamelCase : List[str] = output_stride UpperCamelCase : Union[str, Any] = classifier_dropout_prob UpperCamelCase : List[Any] = use_labels UpperCamelCase : Any = is_training UpperCamelCase : int = num_labels UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Tuple = scope UpperCamelCase : List[str] = width_multiplier UpperCamelCase : Any = ffn_dropout UpperCamelCase : List[Any] = attn_dropout def snake_case_ ( self ) -> int: UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[str] = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def snake_case_ ( self ) -> int: return MobileViTVaConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Any = self.num_labels UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ : Any = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Dict = MobileViTVaModelTester(self ) UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def snake_case_ ( self ) -> int: pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def snake_case_ ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> List[str]: UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : str = [*signature.parameters.keys()] UpperCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Tuple = outputs.hidden_states UpperCamelCase : Dict = 5 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase : Any = 2 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[int] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> str: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) @slow def snake_case_ ( self ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Tuple: UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> str: return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ], device=SCREAMING_SNAKE_CASE_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Tuple = prepare_img() UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = outputs.logits.detach().cpu() UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] ) UpperCamelCase : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
40
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __UpperCAmelCase = random.Random() def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any: if rng is None: UpperCamelCase : int = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]: UpperCamelCase : Dict = parent UpperCamelCase : Dict = batch_size UpperCamelCase : Any = min_seq_length UpperCamelCase : Optional[int] = max_seq_length UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Tuple = feature_size UpperCamelCase : Any = padding_value UpperCamelCase : Tuple = sampling_rate UpperCamelCase : Optional[Any] = return_attention_mask UpperCamelCase : Optional[Any] = do_normalize def snake_case_ ( self ) -> Union[str, Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Union[str, Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Any = WavaVecaFeatureExtractor def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) ) def snake_case_ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test batched UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : Any = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Tuple = range(800, 1400, 200 ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths] UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : List[str] = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : int = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' ) UpperCamelCase : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' ) UpperCamelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def snake_case_ ( self ) -> str: import torch UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def snake_case_ ( self ) -> Tuple: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
40
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : torch.FloatTensor class lowerCAmelCase_ ( a__ , a__ ): @register_to_config def __init__( self, SCREAMING_SNAKE_CASE_ = 6_5536, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 0, SCREAMING_SNAKE_CASE_ = "fourier", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), SCREAMING_SNAKE_CASE_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), SCREAMING_SNAKE_CASE_ = "UNetMidBlock1D", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = (32, 32, 64), SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = False, ) -> Any: super().__init__() UpperCamelCase : Optional[Any] = sample_size # time if time_embedding_type == "fourier": UpperCamelCase : Any = GaussianFourierProjection( embedding_size=8, set_W_to_weight=SCREAMING_SNAKE_CASE_, log=SCREAMING_SNAKE_CASE_, flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = 2 * block_out_channels[0] elif time_embedding_type == "positional": UpperCamelCase : Optional[Any] = Timesteps( block_out_channels[0], flip_sin_to_cos=SCREAMING_SNAKE_CASE_, downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = block_out_channels[0] if use_timestep_embedding: UpperCamelCase : List[Any] = block_out_channels[0] * 4 UpperCamelCase : Any = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_, time_embed_dim=SCREAMING_SNAKE_CASE_, act_fn=SCREAMING_SNAKE_CASE_, out_dim=block_out_channels[0], ) UpperCamelCase : Tuple = nn.ModuleList([] ) UpperCamelCase : Dict = None UpperCamelCase : Tuple = nn.ModuleList([] ) UpperCamelCase : Optional[Any] = None # down UpperCamelCase : Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = output_channel UpperCamelCase : Union[str, Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels UpperCamelCase : str = i == len(SCREAMING_SNAKE_CASE_ ) - 1 UpperCamelCase : Tuple = get_down_block( SCREAMING_SNAKE_CASE_, num_layers=SCREAMING_SNAKE_CASE_, in_channels=SCREAMING_SNAKE_CASE_, out_channels=SCREAMING_SNAKE_CASE_, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid UpperCamelCase : Dict = get_mid_block( SCREAMING_SNAKE_CASE_, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=SCREAMING_SNAKE_CASE_, add_downsample=SCREAMING_SNAKE_CASE_, ) # up UpperCamelCase : Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Tuple = reversed_block_out_channels[0] if out_block_type is None: UpperCamelCase : List[Any] = out_channels else: UpperCamelCase : Tuple = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = output_channel UpperCamelCase : Union[str, Any] = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) UpperCamelCase : Dict = i == len(SCREAMING_SNAKE_CASE_ ) - 1 UpperCamelCase : List[str] = get_up_block( SCREAMING_SNAKE_CASE_, num_layers=SCREAMING_SNAKE_CASE_, in_channels=SCREAMING_SNAKE_CASE_, out_channels=SCREAMING_SNAKE_CASE_, temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = output_channel # out UpperCamelCase : List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32 ) UpperCamelCase : Tuple = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_, num_groups_out=SCREAMING_SNAKE_CASE_, embed_dim=block_out_channels[0], out_channels=SCREAMING_SNAKE_CASE_, act_fn=SCREAMING_SNAKE_CASE_, fc_dim=block_out_channels[-1] // 4, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, ) -> Union[UNetaDOutput, Tuple]: UpperCamelCase : Optional[Any] = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = torch.tensor([timesteps], dtype=torch.long, device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: UpperCamelCase : List[str] = timesteps[None].to(sample.device ) UpperCamelCase : Tuple = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: UpperCamelCase : str = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Dict = timestep_embed[..., None] UpperCamelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) UpperCamelCase : Optional[int] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down UpperCamelCase : str = () for downsample_block in self.down_blocks: UpperCamelCase , UpperCamelCase : Tuple = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_, temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: UpperCamelCase : Tuple = self.mid_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): UpperCamelCase : Optional[Any] = down_block_res_samples[-1:] UpperCamelCase : Optional[int] = down_block_res_samples[:-1] UpperCamelCase : Any = upsample_block(SCREAMING_SNAKE_CASE_, res_hidden_states_tuple=SCREAMING_SNAKE_CASE_, temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: UpperCamelCase : Any = self.out_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
40
def UpperCamelCase ( snake_case__ : int ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCamelCase : int = False if num < 0: UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCamelCase ( snake_case__ : str ) -> None: UpperCamelCase , UpperCamelCase : Tuple = analyze_text(snake_case__ ) UpperCamelCase : Optional[Any] = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. UpperCamelCase : List[str] = sum(single_char_strings.values() ) # one length string UpperCamelCase : Optional[Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCamelCase : Tuple = single_char_strings[ch] UpperCamelCase : Any = my_str / all_sum my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string UpperCamelCase : List[str] = sum(two_char_strings.values() ) UpperCamelCase : Union[str, Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCamelCase : List[Any] = cha + cha if sequence in two_char_strings: UpperCamelCase : List[str] = two_char_strings[sequence] UpperCamelCase : Dict = int(snake_case__ ) / all_sum my_sec_sum += prob * math.loga(snake_case__ ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCamelCase ( snake_case__ : str ) -> tuple[dict, dict]: UpperCamelCase : List[str] = Counter() # type: ignore UpperCamelCase : Any = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(snake_case__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCamelCase ( ) -> Optional[int]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
40
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: UpperCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) UpperCamelCase : Dict = new_module UpperCamelCase : int = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) UpperCamelCase : Union[str, Any] = tensor_name in module._buffers UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) UpperCamelCase : Optional[Any] = False UpperCamelCase : str = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase : List[str] = False UpperCamelCase : Tuple = False else: UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCamelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase : Dict = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[Any] = value.to('cpu' ) if value.dtype == torch.inta: UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: UpperCamelCase : Union[str, Any] = new_value.T UpperCamelCase : Union[str, Any] = old_value.__dict__ if is_abit: UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) UpperCamelCase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[str] = value.to(snake_case__ ) else: UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: UpperCamelCase : Optional[int] = new_value else: UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) UpperCamelCase : List[str] = new_value def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int: for name, module in model.named_children(): if current_key_name is None: UpperCamelCase : str = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape else: UpperCamelCase : Any = module.in_features UpperCamelCase : List[str] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase : Any = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase : Optional[int] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase : str = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase : int = True # Store the module class in case we need to transpose the weight later UpperCamelCase : Any = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]: UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] ) UpperCamelCase : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase : List[Any] = list(model.named_children() ) UpperCamelCase : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys UpperCamelCase : Tuple = ['.weight', '.bias'] UpperCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
40
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> None: UpperCamelCase : int = pos_x UpperCamelCase : Optional[Any] = pos_y UpperCamelCase : Tuple = (pos_y, pos_x) UpperCamelCase : Any = goal_x UpperCamelCase : Union[str, Any] = goal_y UpperCamelCase : Optional[int] = g_cost UpperCamelCase : str = parent UpperCamelCase : Optional[Any] = self.calculate_heuristic() UpperCamelCase : str = self.g_cost + self.h_cost def snake_case_ ( self ) -> float: UpperCamelCase : str = self.pos_x - self.goal_x UpperCamelCase : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self, SCREAMING_SNAKE_CASE_ ) -> bool: return self.f_cost < other.f_cost class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase : List[Any] = Node(start[1], start[0], goal[1], goal[0], 0, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = [self.start] UpperCamelCase : list[Node] = [] UpperCamelCase : Optional[int] = False def snake_case_ ( self ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCamelCase : Union[str, Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(SCREAMING_SNAKE_CASE_ ) self.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.get_successors(SCREAMING_SNAKE_CASE_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: # retrieve the best current path UpperCamelCase : List[str] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) return [self.start.pos] def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> list[Node]: UpperCamelCase : Any = [] for action in delta: UpperCamelCase : str = parent.pos_x + action[1] UpperCamelCase : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, SCREAMING_SNAKE_CASE_, ) ) return successors def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> list[TPosition]: UpperCamelCase : Union[str, Any] = node UpperCamelCase : str = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCamelCase : Union[str, Any] = current_node.parent path.reverse() return path class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase : Union[str, Any] = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = False def snake_case_ ( self ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCamelCase : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 ) UpperCamelCase : str = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = current_bwd_node UpperCamelCase : Tuple = current_fwd_node UpperCamelCase : Tuple = { self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ), self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: # retrieve the best current path UpperCamelCase : Optional[Any] = astar.open_nodes.pop( astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) return [self.fwd_astar.start.pos] def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> list[TPosition]: UpperCamelCase : Optional[Any] = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ ) bwd_path.pop() bwd_path.reverse() UpperCamelCase : Tuple = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
40
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( snake_case__ : int ) -> Dict: UpperCamelCase : Optional[Any] = tmp_path / 'file.csv' UpperCamelCase : Optional[Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]: UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' UpperCamelCase : Any = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Any = tmp_path / 'csv_with_image.csv' UpperCamelCase : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple: UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv' UpperCamelCase : Dict = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv' UpperCamelCase : Union[str, Any] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]: UpperCamelCase : str = Csv() UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case__ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(snake_case__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : List[str] = f.read().splitlines()[1] UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] ) UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() UpperCamelCase : str = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( snake_case__ : Any ) -> str: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : Any = f.read().splitlines()[1:] UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] ) UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() UpperCamelCase : List[str] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels] def UpperCamelCase ( snake_case__ : str ) -> List[Any]: UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} ) UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] ) UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) UpperCamelCase : str = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
40
1
from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase_ : def __init__( self ) -> str: UpperCamelCase : List[Any] = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase : Tuple = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE_ ) if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = probability def snake_case_ ( self ) -> list[str]: return list(self.connections ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase : List[Any] = 0 UpperCamelCase : Tuple = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def UpperCamelCase ( snake_case__ : str , snake_case__ : list[tuple[str, str, float]] , snake_case__ : int ) -> dict[str, int]: UpperCamelCase : int = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(snake_case__ , snake_case__ , snake_case__ ) UpperCamelCase : List[str] = Counter(graph.get_nodes() ) UpperCamelCase : List[str] = start for _ in range(snake_case__ ): UpperCamelCase : List[Any] = graph.transition(snake_case__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
40
import math import random def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float: UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase : int = (expected / 100) - layer_a # Error delta UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('''Expected value: ''')) __UpperCAmelCase = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
40
1
import qiskit def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> qiskit.result.counts.Counts: UpperCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register UpperCamelCase : int = qiskit.QuantumCircuit(snake_case__ , snake_case__ ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator UpperCamelCase : str = qiskit.execute(snake_case__ , snake_case__ , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(snake_case__ ) if __name__ == "__main__": print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
40
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]: return EnvironmentCommand() class lowerCAmelCase_ ( a__ ): @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase : List[Any] = parser.add_parser('env' ) download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = huggingface_hub.__version__ UpperCamelCase : int = 'not installed' UpperCamelCase : Union[str, Any] = 'NA' if is_torch_available(): import torch UpperCamelCase : Any = torch.__version__ UpperCamelCase : str = torch.cuda.is_available() UpperCamelCase : Dict = 'not installed' if is_transformers_available(): import transformers UpperCamelCase : str = transformers.__version__ UpperCamelCase : Optional[Any] = 'not installed' if is_accelerate_available(): import accelerate UpperCamelCase : Dict = accelerate.__version__ UpperCamelCase : List[str] = 'not installed' if is_xformers_available(): import xformers UpperCamelCase : List[str] = xformers.__version__ UpperCamelCase : Dict = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(SCREAMING_SNAKE_CASE_ ) ) return info @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
40
1
from __future__ import annotations import queue class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase : Optional[Any] = data UpperCamelCase : Any = None UpperCamelCase : List[Any] = None def UpperCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) UpperCamelCase : Optional[int] = input('Enter the value of the root node: ' ).strip().lower() UpperCamelCase : queue.Queue = queue.Queue() UpperCamelCase : int = TreeNode(int(snake_case__ ) ) q.put(snake_case__ ) while not q.empty(): UpperCamelCase : Tuple = q.get() UpperCamelCase : List[Any] = F"""Enter the left node of {node_found.data}: """ UpperCamelCase : List[Any] = input(snake_case__ ).strip().lower() or 'n' if check == "n": return tree_node UpperCamelCase : Dict = TreeNode(int(snake_case__ ) ) UpperCamelCase : Any = left_node q.put(snake_case__ ) UpperCamelCase : List[Any] = F"""Enter the right node of {node_found.data}: """ UpperCamelCase : Optional[Any] = input(snake_case__ ).strip().lower() or 'n' if check == "n": return tree_node UpperCamelCase : Tuple = TreeNode(int(snake_case__ ) ) UpperCamelCase : str = right_node q.put(snake_case__ ) raise def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return UpperCamelCase : queue.Queue = queue.Queue() q.put(snake_case__ ) while not q.empty(): UpperCamelCase : List[str] = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return UpperCamelCase : queue.Queue = queue.Queue() q.put(snake_case__ ) while not q.empty(): UpperCamelCase : str = [] while not q.empty(): UpperCamelCase : Optional[Any] = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case__ ) def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return UpperCamelCase : list[TreeNode] = [] UpperCamelCase : Dict = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(snake_case__ ) UpperCamelCase : List[str] = n.left # end of while means current node doesn't have left child UpperCamelCase : Union[str, Any] = stack.pop() # start to traverse its right child UpperCamelCase : int = n.right def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return UpperCamelCase : list[TreeNode] = [] UpperCamelCase : Tuple = node while n or stack: while n: stack.append(snake_case__ ) UpperCamelCase : List[Any] = n.left UpperCamelCase : int = stack.pop() print(n.data , end=',' ) UpperCamelCase : Optional[Any] = n.right def UpperCamelCase ( snake_case__ : TreeNode ) -> None: if not isinstance(snake_case__ , snake_case__ ) or not node: return UpperCamelCase , UpperCamelCase : Optional[int] = [], [] UpperCamelCase : List[str] = node stacka.append(snake_case__ ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCamelCase : Optional[Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def UpperCamelCase ( snake_case__ : str = "" , snake_case__ : List[str]=50 , snake_case__ : List[Any]="*" ) -> str: if not s: return "\n" + width * char UpperCamelCase , UpperCamelCase : Union[str, Any] = divmod(width - len(snake_case__ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) __UpperCAmelCase = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 50 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
40
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''▁''' __UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } __UpperCAmelCase = { '''facebook/xglm-564M''': 2_048, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None: UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCamelCase : Any = 7 UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} UpperCamelCase : Optional[int] = len(self.sp_model ) UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: UpperCamelCase : int = self.__dict__.copy() UpperCamelCase : Union[str, Any] = None UpperCamelCase : int = self.sp_model.serialized_model_proto() return state def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase : Any = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): UpperCamelCase : Any = {} UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCamelCase : Optional[int] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : str = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case_ ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip() return out_string def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi: UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
40
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __UpperCAmelCase = random.Random() def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Tuple=1.0 , snake_case__ : int=None , snake_case__ : Optional[Any]=None ) -> List[Any]: if rng is None: UpperCamelCase : Union[str, Any] = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=160, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=4000, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, ) -> str: UpperCamelCase : Dict = parent UpperCamelCase : Optional[Any] = batch_size UpperCamelCase : Optional[int] = min_seq_length UpperCamelCase : Optional[Any] = max_seq_length UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Optional[int] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[Any] = return_attention_mask UpperCamelCase : Union[str, Any] = do_normalize UpperCamelCase : Tuple = feature_size UpperCamelCase : Dict = chunk_length UpperCamelCase : Dict = hop_length def snake_case_ ( self ) -> List[Any]: return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: UpperCamelCase : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCamelCase : Optional[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : str = WhisperFeatureExtractor if is_speech_available() else None def snake_case_ ( self ) -> str: UpperCamelCase : int = WhisperFeatureExtractionTester(self ) def snake_case_ ( self ) -> Any: UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase : Union[str, Any] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0] check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = feat_extract_first.to_dict() UpperCamelCase : int = feat_extract_second.to_dict() UpperCamelCase : Union[str, Any] = feat_extract_first.mel_filters UpperCamelCase : Optional[Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_, 'feat_extract.json' ) feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = feat_extract_first.to_dict() UpperCamelCase : List[str] = feat_extract_second.to_dict() UpperCamelCase : int = feat_extract_first.mel_filters UpperCamelCase : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> List[str]: # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : int = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test feature size UpperCamelCase : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE_, padding='max_length', return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input UpperCamelCase : List[str] = feature_extractor(speech_inputs[0], return_tensors='np' ).input_features UpperCamelCase : Tuple = feature_extractor(np_speech_inputs[0], return_tensors='np' ).input_features self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test batched UpperCamelCase : Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_features UpperCamelCase : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_features UpperCamelCase : Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test truncation required UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(200, (feature_extractor.n_samples + 500), 200 )] UpperCamelCase : List[str] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] UpperCamelCase : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs] UpperCamelCase : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs_truncated] UpperCamelCase : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_features UpperCamelCase : Tuple = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) def snake_case_ ( self ) -> Dict: import torch UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = np.random.rand(100, 32 ).astype(np.floataa ) UpperCamelCase : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[str] = feature_extractor.pad([{'input_features': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) UpperCamelCase : List[Any] = feature_extractor.pad([{'input_features': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : List[Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' ) # automatic decoding with librispeech UpperCamelCase : Dict = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def snake_case_ ( self ) -> int: # fmt: off UpperCamelCase : Dict = torch.tensor( [ 0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51, 0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78, 0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54, -0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54 ] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Dict = WhisperFeatureExtractor() UpperCamelCase : Any = feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).input_features self.assertEqual(input_features.shape, (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : str = self._load_datasamples(1 )[0] UpperCamelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue UpperCamelCase : Any = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=SCREAMING_SNAKE_CASE_ )[0] self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ ) - 1 ) < 1e-3 ) )
40
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCAmelCase = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __UpperCAmelCase = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] UpperCAmelCase__ : Dict = RobertaTokenizer def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) ) UpperCamelCase : List[str] = add_prefix_space UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = add_prefix_space UpperCamelCase : Optional[Any] = 'post_processor' UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase : Optional[Any] = tuple(state['sep'] ) if "cls" in state: UpperCamelCase : Optional[int] = tuple(state['cls'] ) UpperCamelCase : Any = False if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Optional[int] = add_prefix_space UpperCamelCase : List[Any] = True if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase : Dict = trim_offsets UpperCamelCase : Union[str, Any] = True if changes_to_apply: UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) ) UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase : List[Any] = value def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple: UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
40
1
from collections import Counter from timeit import timeit def UpperCamelCase ( snake_case__ : str = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def UpperCamelCase ( snake_case__ : str = "" ) -> bool: if len(snake_case__ ) == 0: return True UpperCamelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string UpperCamelCase : dict[str, int] = {} for character in lower_case_input_str: UpperCamelCase : List[Any] = character_freq_dict.get(snake_case__ , 0 ) + 1 UpperCamelCase : int = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def UpperCamelCase ( snake_case__ : str = "" ) -> None: print('\nFor string = ' , snake_case__ , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(snake_case__ ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(snake_case__ ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": __UpperCAmelCase = input( '''Enter string to determine if it can be rearranged as a palindrome or not: ''' ).strip() benchmark(check_str) __UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
40
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple: super().__init__(features=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch_tensor_kwargs import torch # noqa import torch at initialization def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict: import torch if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any: import torch if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ): return value.tolist() UpperCamelCase : str = {} if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ): UpperCamelCase : List[str] = {'dtype': torch.intaa} elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ): UpperCamelCase : int = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: import torch # support for torch, tf, jax etc. if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ): UpperCamelCase : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor": UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] ) UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: UpperCamelCase : str = self._consolidate(batch[column_name] ) return batch
40
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[Any] = ["image_processor", "tokenizer"] UpperCAmelCase__ : Optional[int] = "CLIPImageProcessor" UpperCAmelCase__ : Any = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Dict = kwargs.pop('feature_extractor' ) UpperCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def __call__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> List[str]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: UpperCamelCase : Dict = self.tokenizer(SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) if images is not None: UpperCamelCase : List[str] = self.image_processor(SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) if text is not None and images is not None: UpperCamelCase : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ), tensor_type=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Tuple = self.tokenizer.model_input_names UpperCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
40
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: UpperCamelCase : int = ( 'Wrong input data\'s dimensions... ' F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(snake_case__ ) try: if dataset.shape[1] != value_array.shape[1]: UpperCamelCase : str = ( 'Wrong input data\'s shape... ' F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(snake_case__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: UpperCamelCase : Dict = ( 'Input data have different datatype... ' F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(snake_case__ ) UpperCamelCase : List[Any] = [] for value in value_array: UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] ) UpperCamelCase : Dict = dataset[0].tolist() for dataset_value in dataset[1:]: UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ ) if dist > temp_dist: UpperCamelCase : str = temp_dist UpperCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ )) if __name__ == "__main__": import doctest doctest.testmod()
40
1
from __future__ import annotations from collections.abc import Iterator from typing import Any class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : Any = data UpperCamelCase : Node | None = None class lowerCAmelCase_ : def __init__( self ) -> Tuple: UpperCamelCase : List[str] = None UpperCamelCase : str = None def __iter__( self ) -> Iterator[Any]: UpperCamelCase : List[str] = self.head while self.head: yield node.data UpperCamelCase : Optional[Any] = node.next if node == self.head: break def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> Dict: return "->".join(str(SCREAMING_SNAKE_CASE_ ) for item in iter(self ) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None: self.insert_nth(len(self ), SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None: self.insert_nth(0, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if index < 0 or index > len(self ): raise IndexError('list index out of range.' ) UpperCamelCase : Dict = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: UpperCamelCase : Dict = new_node # first node points itself UpperCamelCase : Tuple = new_node elif index == 0: # insert at head UpperCamelCase : List[str] = self.head UpperCamelCase : Optional[int] = new_node else: UpperCamelCase : List[Any] = self.head for _ in range(index - 1 ): UpperCamelCase : List[str] = temp.next UpperCamelCase : List[str] = temp.next UpperCamelCase : Optional[int] = new_node if index == len(self ) - 1: # insert at tail UpperCamelCase : Union[str, Any] = new_node def snake_case_ ( self ) -> Optional[Any]: return self.delete_nth(0 ) def snake_case_ ( self ) -> Any: return self.delete_nth(len(self ) - 1 ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = 0 ) -> Any: if not 0 <= index < len(self ): raise IndexError('list index out of range.' ) UpperCamelCase : Dict = self.head if self.head == self.tail: # just one node UpperCamelCase : int = None elif index == 0: # delete head node UpperCamelCase : Optional[int] = self.tail.next.next UpperCamelCase : Any = self.head.next else: UpperCamelCase : Optional[int] = self.head for _ in range(index - 1 ): UpperCamelCase : List[Any] = temp.next UpperCamelCase : Optional[Any] = temp.next UpperCamelCase : Union[str, Any] = temp.next.next if index == len(self ) - 1: # delete at tail UpperCamelCase : List[Any] = temp return delete_node.data def snake_case_ ( self ) -> bool: return len(self ) == 0 def UpperCamelCase ( ) -> None: UpperCamelCase : Tuple = CircularLinkedList() assert len(snake_case__ ) == 0 assert circular_linked_list.is_empty() is True assert str(snake_case__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(snake_case__ ) == i circular_linked_list.insert_nth(snake_case__ , i + 1 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
40
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) __UpperCAmelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = '''Normal''' if result[0][0] == 1: __UpperCAmelCase = '''Abnormality detected'''
40
1
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration __UpperCAmelCase = HfArgumentParser(InitializationArguments) __UpperCAmelCase = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization __UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks __UpperCAmelCase = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) __UpperCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config __UpperCAmelCase = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
40
import os import pytest from attr import dataclass __UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" UpperCAmelCase__ : Union[str, Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self ) -> str: return F"""{self.framework}-transfromers-test""" @property def snake_case_ ( self ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
40
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Optional[Any] = "mgp-str" def __init__( self, SCREAMING_SNAKE_CASE_=[32, 128], SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=27, SCREAMING_SNAKE_CASE_=38, SCREAMING_SNAKE_CASE_=5_0257, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=4.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.02, **SCREAMING_SNAKE_CASE_, ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : int = patch_size UpperCamelCase : Dict = num_channels UpperCamelCase : Tuple = max_token_length UpperCamelCase : Optional[Any] = num_character_labels UpperCamelCase : Dict = num_bpe_labels UpperCamelCase : str = num_wordpiece_labels UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Tuple = num_attention_heads UpperCamelCase : List[Any] = mlp_ratio UpperCamelCase : List[str] = distilled UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : List[Any] = drop_rate UpperCamelCase : Tuple = qkv_bias UpperCamelCase : Union[str, Any] = attn_drop_rate UpperCamelCase : Optional[Any] = drop_path_rate UpperCamelCase : int = output_aa_attentions UpperCamelCase : Optional[int] = initializer_range
40
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = '''src/transformers''' __UpperCAmelCase = '''docs/source/en/tasks''' def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]: with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCamelCase : Optional[Any] = f.readlines() # Find the start prompt. UpperCamelCase : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 UpperCamelCase : Optional[Any] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]: UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide] UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) UpperCamelCase : Tuple = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
40
1
from math import isclose, sqrt def UpperCamelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> tuple[float, float, float]: UpperCamelCase : List[Any] = point_y / 4 / point_x UpperCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) UpperCamelCase : Union[str, Any] = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) UpperCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 UpperCamelCase : Union[str, Any] = outgoing_gradient**2 + 4 UpperCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) UpperCamelCase : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 UpperCamelCase : Tuple = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) UpperCamelCase : Dict = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point UpperCamelCase : Union[str, Any] = x_minus if isclose(snake_case__ , snake_case__ ) else x_plus UpperCamelCase : Any = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def UpperCamelCase ( snake_case__ : float = 1.4 , snake_case__ : float = -9.6 ) -> int: UpperCamelCase : int = 0 UpperCamelCase : float = first_x_coord UpperCamelCase : float = first_y_coord UpperCamelCase : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = next_point(snake_case__ , snake_case__ , snake_case__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
40
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : int = IFPipeline UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ) -> str: return self._get_dummy_components() def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case_ ( self ) -> Optional[int]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def snake_case_ ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ) -> Dict: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ) -> Optional[int]: self._test_save_load_local() def snake_case_ ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def snake_case_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> List[Any]: # if UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa ) UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Tuple = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : List[Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
40
1
import copy import random from transformers import CLIPTokenizer class lowerCAmelCase_ ( a__ ): def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]: super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase : List[Any] = super().add_tokens(SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ' `placeholder_token` that is not already in the tokenizer.' ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=1, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: UpperCamelCase : List[str] = [] if num_vec_per_token == 1: self.try_adding_tokens(SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) output.append(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : int = [] for i in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = placeholder_token + F"""_{i}""" self.try_adding_tokens(SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) output.append(SCREAMING_SNAKE_CASE_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) UpperCamelCase : Optional[int] = output def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1.0 ) -> str: if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=SCREAMING_SNAKE_CASE_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: UpperCamelCase : List[Any] = self.token_map[placeholder_token] UpperCamelCase : Optional[Any] = tokens[: 1 + int(len(SCREAMING_SNAKE_CASE_ ) * prop_tokens_to_load )] if vector_shuffle: UpperCamelCase : Union[str, Any] = copy.copy(SCREAMING_SNAKE_CASE_ ) random.shuffle(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = text.replace(SCREAMING_SNAKE_CASE_, ' '.join(SCREAMING_SNAKE_CASE_ ) ) return text def __call__( self, SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1.0, **SCREAMING_SNAKE_CASE_ ) -> Any: return super().__call__( self.replace_placeholder_tokens_in_text( SCREAMING_SNAKE_CASE_, vector_shuffle=SCREAMING_SNAKE_CASE_, prop_tokens_to_load=SCREAMING_SNAKE_CASE_ ), *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1.0, **SCREAMING_SNAKE_CASE_ ) -> Tuple: return super().encode( self.replace_placeholder_tokens_in_text( SCREAMING_SNAKE_CASE_, vector_shuffle=SCREAMING_SNAKE_CASE_, prop_tokens_to_load=SCREAMING_SNAKE_CASE_ ), *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
40
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase ( snake_case__ : Tuple="" ) -> str: UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> int: UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) ) def snake_case_ ( self ) -> Any: UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' ) sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 ) UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) ) UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = 'Hey!' UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
1
def UpperCamelCase ( snake_case__ : list , snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int: if index == number_of_items: return 0 UpperCamelCase : str = 0 UpperCamelCase : Union[str, Any] = 0 UpperCamelCase : List[str] = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: UpperCamelCase : Optional[int] = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
40
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : int = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase : List[Any] = [] UpperCamelCase : List[Any] = list(range(snake_case__ ) ) # Find permutation while factorials: UpperCamelCase : int = factorials.pop() UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
40
1
from typing import TYPE_CHECKING from ....utils import _LazyModule __UpperCAmelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
40
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase_ ( a__ ): def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) ) class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any: UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : List[Any] = image_size UpperCamelCase : List[str] = patch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 ) UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[int] = conv_kernel_size UpperCamelCase : List[str] = output_stride UpperCamelCase : Union[str, Any] = classifier_dropout_prob UpperCamelCase : List[Any] = use_labels UpperCamelCase : Any = is_training UpperCamelCase : int = num_labels UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Tuple = scope UpperCamelCase : List[str] = width_multiplier UpperCamelCase : Any = ffn_dropout UpperCamelCase : List[Any] = attn_dropout def snake_case_ ( self ) -> int: UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[str] = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def snake_case_ ( self ) -> int: return MobileViTVaConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Any = self.num_labels UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ : Any = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Dict = MobileViTVaModelTester(self ) UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def snake_case_ ( self ) -> int: pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def snake_case_ ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> List[str]: UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : str = [*signature.parameters.keys()] UpperCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Tuple = outputs.hidden_states UpperCamelCase : Dict = 5 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase : Any = 2 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[int] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> str: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) @slow def snake_case_ ( self ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Tuple: UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> str: return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ], device=SCREAMING_SNAKE_CASE_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Tuple = prepare_img() UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = outputs.logits.detach().cpu() UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] ) UpperCamelCase : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
40
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''', } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Any = "bloom" UpperCAmelCase__ : List[Any] = ["past_key_values"] UpperCAmelCase__ : Any = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self, SCREAMING_SNAKE_CASE_=25_0880, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Dict: UpperCamelCase : List[Any] = vocab_size # Backward compatibility with n_embed kwarg UpperCamelCase : Optional[Any] = kwargs.pop('n_embed', SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = hidden_size if n_embed is None else n_embed UpperCamelCase : Dict = n_layer UpperCamelCase : Optional[int] = n_head UpperCamelCase : Union[str, Any] = layer_norm_epsilon UpperCamelCase : Dict = initializer_range UpperCamelCase : Optional[int] = use_cache UpperCamelCase : int = pretraining_tp UpperCamelCase : Union[str, Any] = apply_residual_connection_post_layernorm UpperCamelCase : Optional[int] = hidden_dropout UpperCamelCase : List[str] = attention_dropout UpperCamelCase : List[str] = bos_token_id UpperCamelCase : Optional[int] = eos_token_id UpperCamelCase : int = slow_but_exact super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Tuple = version.parse("1.12" ) def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> Dict: super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ ) if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ): # TODO: how to do that better? UpperCamelCase : int = 0 @property def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]: UpperCamelCase : Union[str, Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs', inverted_values_shape=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = {0: 'batch', 1: 'past_sequence + sequence'} else: UpperCamelCase : Any = {0: 'batch', 1: 'sequence'} return common_inputs @property def snake_case_ ( self ) -> int: return self._config.n_layer @property def snake_case_ ( self ) -> int: return self._config.n_head @property def snake_case_ ( self ) -> float: return 1e-3 def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]: UpperCamelCase : Union[str, Any] = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ ) # We need to order the input in the way they appears in the forward() UpperCamelCase : int = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values UpperCamelCase : Dict = seqlen + 2 UpperCamelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads UpperCamelCase : Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) UpperCamelCase : List[Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) UpperCamelCase : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers ) ] UpperCamelCase : Optional[Any] = common_inputs['attention_mask'] if self.use_past: UpperCamelCase : List[Any] = ordered_inputs['attention_mask'].dtype UpperCamelCase : str = torch.cat( [ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 ) return ordered_inputs @property def snake_case_ ( self ) -> int: return 13
40
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str: UpperCamelCase : List[str] = [0] * len(snake_case__ ) UpperCamelCase : int = [] UpperCamelCase : Optional[int] = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: UpperCamelCase : Optional[int] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase : Tuple = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph __UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
40
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : jnp.ndarray UpperCAmelCase__ : jnp.ndarray class lowerCAmelCase_ ( nn.Module ): UpperCAmelCase__ : int UpperCAmelCase__ : Tuple[int] = (16, 32, 96, 256) UpperCAmelCase__ : jnp.dtype = jnp.floataa def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) UpperCamelCase : str = [] for i in range(len(self.block_out_channels ) - 1 ): UpperCamelCase : Optional[Any] = self.block_out_channels[i] UpperCamelCase : Optional[Any] = self.block_out_channels[i + 1] UpperCamelCase : List[Any] = nn.Conv( SCREAMING_SNAKE_CASE_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = nn.Conv( SCREAMING_SNAKE_CASE_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = blocks UpperCamelCase : List[Any] = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Optional[int] = self.conv_in(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = nn.silu(SCREAMING_SNAKE_CASE_ ) for block in self.blocks: UpperCamelCase : Any = block(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = nn.silu(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.conv_out(SCREAMING_SNAKE_CASE_ ) return embedding @flax_register_to_config class lowerCAmelCase_ ( nn.Module , a__ , a__ ): UpperCAmelCase__ : int = 32 UpperCAmelCase__ : int = 4 UpperCAmelCase__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) UpperCAmelCase__ : Union[bool, Tuple[bool]] = False UpperCAmelCase__ : Tuple[int] = (320, 640, 1280, 1280) UpperCAmelCase__ : int = 2 UpperCAmelCase__ : Union[int, Tuple[int]] = 8 UpperCAmelCase__ : Optional[Union[int, Tuple[int]]] = None UpperCAmelCase__ : int = 1280 UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa UpperCAmelCase__ : bool = True UpperCAmelCase__ : int = 0 UpperCAmelCase__ : str = "rgb" UpperCAmelCase__ : Tuple[int] = (16, 32, 96, 256) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> FrozenDict: # init input tensors UpperCamelCase : Tuple = (1, self.in_channels, self.sample_size, self.sample_size) UpperCamelCase : List[str] = jnp.zeros(SCREAMING_SNAKE_CASE_, dtype=jnp.floataa ) UpperCamelCase : List[str] = jnp.ones((1,), dtype=jnp.intaa ) UpperCamelCase : Any = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) UpperCamelCase : Any = (1, 3, self.sample_size * 8, self.sample_size * 8) UpperCamelCase : Union[str, Any] = jnp.zeros(SCREAMING_SNAKE_CASE_, dtype=jnp.floataa ) UpperCamelCase , UpperCamelCase : Tuple = jax.random.split(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = {'params': params_rng, 'dropout': dropout_rng} return self.init(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )["params"] def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[int] = self.block_out_channels UpperCamelCase : Optional[int] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. UpperCamelCase : List[str] = self.num_attention_heads or self.attention_head_dim # input UpperCamelCase : int = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time UpperCamelCase : List[str] = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) UpperCamelCase : Optional[int] = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE_, dtype=self.dtype ) UpperCamelCase : int = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) UpperCamelCase : str = self.only_cross_attention if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = (only_cross_attention,) * len(self.down_block_types ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = (num_attention_heads,) * len(self.down_block_types ) # down UpperCamelCase : Optional[int] = [] UpperCamelCase : Any = [] UpperCamelCase : str = block_out_channels[0] UpperCamelCase : str = nn.Conv( SCREAMING_SNAKE_CASE_, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ ) for i, down_block_type in enumerate(self.down_block_types ): UpperCamelCase : Any = output_channel UpperCamelCase : Optional[Any] = block_out_channels[i] UpperCamelCase : int = i == len(SCREAMING_SNAKE_CASE_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": UpperCamelCase : Tuple = FlaxCrossAttnDownBlockaD( in_channels=SCREAMING_SNAKE_CASE_, out_channels=SCREAMING_SNAKE_CASE_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: UpperCamelCase : List[str] = FlaxDownBlockaD( in_channels=SCREAMING_SNAKE_CASE_, out_channels=SCREAMING_SNAKE_CASE_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(SCREAMING_SNAKE_CASE_ ) for _ in range(self.layers_per_block ): UpperCamelCase : Dict = nn.Conv( SCREAMING_SNAKE_CASE_, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ ) if not is_final_block: UpperCamelCase : str = nn.Conv( SCREAMING_SNAKE_CASE_, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = down_blocks UpperCamelCase : Any = controlnet_down_blocks # mid UpperCamelCase : Any = block_out_channels[-1] UpperCamelCase : Optional[int] = FlaxUNetMidBlockaDCrossAttn( in_channels=SCREAMING_SNAKE_CASE_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) UpperCamelCase : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE_, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = False, ) -> Union[FlaxControlNetOutput, Tuple]: UpperCamelCase : List[str] = self.controlnet_conditioning_channel_order if channel_order == "bgr": UpperCamelCase : int = jnp.flip(SCREAMING_SNAKE_CASE_, axis=1 ) # 1. time if not isinstance(SCREAMING_SNAKE_CASE_, jnp.ndarray ): UpperCamelCase : Any = jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(SCREAMING_SNAKE_CASE_, jnp.ndarray ) and len(timesteps.shape ) == 0: UpperCamelCase : Tuple = timesteps.astype(dtype=jnp.floataa ) UpperCamelCase : Union[str, Any] = jnp.expand_dims(SCREAMING_SNAKE_CASE_, 0 ) UpperCamelCase : Optional[Any] = self.time_proj(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = self.time_embedding(SCREAMING_SNAKE_CASE_ ) # 2. pre-process UpperCamelCase : Tuple = jnp.transpose(SCREAMING_SNAKE_CASE_, (0, 2, 3, 1) ) UpperCamelCase : List[Any] = self.conv_in(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = jnp.transpose(SCREAMING_SNAKE_CASE_, (0, 2, 3, 1) ) UpperCamelCase : List[Any] = self.controlnet_cond_embedding(SCREAMING_SNAKE_CASE_ ) sample += controlnet_cond # 3. down UpperCamelCase : List[Any] = (sample,) for down_block in self.down_blocks: if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase , UpperCamelCase : Optional[int] = down_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, deterministic=not train ) else: UpperCamelCase , UpperCamelCase : List[str] = down_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, deterministic=not train ) down_block_res_samples += res_samples # 4. mid UpperCamelCase : List[Any] = self.mid_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, deterministic=not train ) # 5. contronet blocks UpperCamelCase : str = () for down_block_res_sample, controlnet_block in zip(SCREAMING_SNAKE_CASE_, self.controlnet_down_blocks ): UpperCamelCase : Any = controlnet_block(SCREAMING_SNAKE_CASE_ ) controlnet_down_block_res_samples += (down_block_res_sample,) UpperCamelCase : Dict = controlnet_down_block_res_samples UpperCamelCase : Optional[int] = self.controlnet_mid_block(SCREAMING_SNAKE_CASE_ ) # 6. scaling UpperCamelCase : List[str] = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=SCREAMING_SNAKE_CASE_, mid_block_res_sample=SCREAMING_SNAKE_CASE_ )
40
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
40
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = '''src/transformers''' __UpperCAmelCase = '''docs/source/en/tasks''' def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]: with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCamelCase : Optional[Any] = f.readlines() # Find the start prompt. UpperCamelCase : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 UpperCamelCase : Optional[Any] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]: UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide] UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) UpperCamelCase : Tuple = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
40
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ : Tuple = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ["""MobileViTFeatureExtractor"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCamelCase (_a , _a ): @register_to_config def __init__( self: List[Any],*, A_: int = 4,A_: int = 768,A_: int,A_: Tuple,): '''simple docstring''' super().__init__() __UpperCamelCase = nn.Parameter(torch.zeros(A_ ) ) # parameters for additional clip time embeddings __UpperCamelCase = nn.Linear(A_,A_ ) __UpperCamelCase = nn.Linear(A_,A_ ) # parameters for encoder hidden states __UpperCamelCase = clip_extra_context_tokens __UpperCamelCase = nn.Linear( A_,self.clip_extra_context_tokens * cross_attention_dim ) __UpperCamelCase = nn.Linear(A_,A_ ) __UpperCamelCase = nn.LayerNorm(A_ ) def snake_case_ ( self: int,*, A_: Optional[int],A_: Tuple,A_: str,A_: int ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __UpperCamelCase = image_embeddings.shape[0] __UpperCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __UpperCamelCase = classifier_free_guidance_embeddings.expand( A_,-1 ) __UpperCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings],dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __UpperCamelCase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __UpperCamelCase = self.embedding_proj(A_ ) __UpperCamelCase = self.clip_image_embeddings_project_to_time_embeddings(A_ ) __UpperCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __UpperCamelCase = self.clip_extra_context_tokens_proj(A_ ) __UpperCamelCase = clip_extra_context_tokens.reshape(A_,-1,self.clip_extra_context_tokens ) __UpperCamelCase = clip_extra_context_tokens.permute(0,2,1 ) __UpperCamelCase = self.encoder_hidden_states_proj(A_ ) __UpperCamelCase = self.text_encoder_hidden_states_norm(A_ ) __UpperCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states],dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''ViTFeatureExtractor'''] __UpperCAmelCase = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class lowerCamelCase__ : """simple docstring""" def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = False ) -> Union[str, Any]: _A = scheduler _A = optimizers if isinstance(__lowerCAmelCase , (list, tuple) ) else [optimizers] _A = split_batches _A = step_with_optimizer _A = GradientState() def snake_case_ ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[Any] ) -> Optional[int]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _A = AcceleratorState().num_processes for _ in range(__lowerCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase ) else: self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : int ) -> List[Any]: return self.scheduler.get_last_lr() def snake_case_ ( self : str ) -> Optional[int]: return self.scheduler.state_dict() def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Optional[Any]: self.scheduler.load_state_dict(__lowerCAmelCase ) def snake_case_ ( self : str ) -> int: return self.scheduler.get_lr() def snake_case_ ( self : Optional[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple: return self.scheduler.print_lr(*__lowerCAmelCase , **__lowerCAmelCase )
2
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __UpperCAmelCase = random.Random() def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any: if rng is None: UpperCamelCase : int = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]: UpperCamelCase : Dict = parent UpperCamelCase : Dict = batch_size UpperCamelCase : Any = min_seq_length UpperCamelCase : Optional[int] = max_seq_length UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Tuple = feature_size UpperCamelCase : Any = padding_value UpperCamelCase : Tuple = sampling_rate UpperCamelCase : Optional[Any] = return_attention_mask UpperCamelCase : Optional[Any] = do_normalize def snake_case_ ( self ) -> Union[str, Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Union[str, Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Any = WavaVecaFeatureExtractor def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) ) def snake_case_ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test batched UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : Any = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Tuple = range(800, 1400, 200 ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths] UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : List[str] = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : int = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' ) UpperCamelCase : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' ) UpperCamelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def snake_case_ ( self ) -> str: import torch UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def snake_case_ ( self ) -> Tuple: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
40
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def A_( A : str , A : Optional[int] , A : List[str]): return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def A_( A : List[str] , A : List[Any] , A : List[str] , A : Union[str, Any]="attention"): UpperCamelCase = UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :]) UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2]) UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :]) UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2]) UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :]) UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2]) UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :]) UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2]) return k, o, q, v def A_( A : List[Any] , A : Dict , A : str , A : Optional[Any]=False): if split_mlp_wi: UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] UpperCamelCase = (wi_a, wi_a) else: UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def A_( A : List[Any] , A : Union[str, Any] , A : Optional[Any] , A : Optional[int]): return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def A_( A : dict , *, A : int , A : bool , A : bool = False): UpperCamelCase = traverse_util.flatten_dict(variables['target']) UpperCamelCase = {'/'.join(A): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , A) UpperCamelCase = collections.OrderedDict() # Shared embeddings. UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(A): # Block i, layer 0 (Self Attention). UpperCamelCase = tax_layer_norm_lookup(A , A , 'encoder' , 'pre_attention_layer_norm') UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(A , A , 'encoder' , 'attention') UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 1 (MLP). UpperCamelCase = tax_layer_norm_lookup(A , A , 'encoder' , 'pre_mlp_layer_norm') UpperCamelCase , UpperCamelCase = tax_mlp_lookup(A , A , 'encoder' , A) UpperCamelCase = layer_norm if split_mlp_wi: UpperCamelCase = wi[0].T UpperCamelCase = wi[1].T else: UpperCamelCase = wi.T UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase = tax_relpos_bias_lookup( A , A , 'encoder').T UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: UpperCamelCase = tax_relpos_bias_lookup( A , 0 , 'encoder').T UpperCamelCase = tax_relpos_bias_lookup( A , 0 , 'decoder').T if not is_encoder_only: # Decoder. for i in range(A): # Block i, layer 0 (Self Attention). UpperCamelCase = tax_layer_norm_lookup(A , A , 'decoder' , 'pre_self_attention_layer_norm') UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(A , A , 'decoder' , 'self_attention') UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). UpperCamelCase = tax_layer_norm_lookup(A , A , 'decoder' , 'pre_cross_attention_layer_norm') UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(A , A , 'decoder' , 'encoder_decoder_attention') UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 2 (MLP). UpperCamelCase = tax_layer_norm_lookup(A , A , 'decoder' , 'pre_mlp_layer_norm') UpperCamelCase , UpperCamelCase = tax_mlp_lookup(A , A , 'decoder' , A) UpperCamelCase = layer_norm if split_mlp_wi: UpperCamelCase = wi[0].T UpperCamelCase = wi[1].T else: UpperCamelCase = wi.T UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase = tax_relpos_bias_lookup(A , A , 'decoder').T UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def A_( A : Any , A : bool): UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.') UpperCamelCase = state_dict['shared.weight'] return state_dict def A_( A : List[str] , A : Optional[Any] , A : str , A : int , A : List[Any]): UpperCamelCase = checkpoints.load_tax_checkpoint(A) UpperCamelCase = convert_tax_to_pytorch( A , num_layers=config.num_layers , is_encoder_only=A , scalable_attention=A) UpperCamelCase = make_state_dict(A , A) model.load_state_dict(A , strict=A) def A_( A : Optional[Any] , A : List[str] , A : List[Any] , A : bool = False , A : bool = False , ): UpperCamelCase = MTaConfig.from_json_file(A) print(f'''Building PyTorch model from configuration: {config}''') # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCamelCase = UMTaEncoderModel(A) else: UpperCamelCase = UMTaForConditionalGeneration(A) # Load weights from tf checkpoint load_tax_weights_in_ta(A , A , A , A , A) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') model.save_pretrained(A) # Verify that we can load the checkpoint. model.from_pretrained(A) print('Done') if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) lowerCAmelCase : Dict = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
3
def UpperCamelCase ( snake_case__ : int ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCamelCase : int = False if num < 0: UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _SCREAMING_SNAKE_CASE (): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowerCAmelCase = '__test_patch_submodule_mock__' with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _SCREAMING_SNAKE_CASE (): assert _test_patching.open is open lowerCAmelCase = '__test_patch_submodule_builtin_mock__' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , 'open' , _UpperCAmelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _SCREAMING_SNAKE_CASE (): # pandas.read_csv is not present in _test_patching lowerCAmelCase = '__test_patch_submodule_missing_mock__' with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCAmelCase ): pass def _SCREAMING_SNAKE_CASE (): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point lowerCAmelCase = '__test_patch_submodule_missing_builtin_mock__' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , 'len' , _UpperCAmelCase ) is None with patch_submodule(_test_patching , 'len' , _UpperCAmelCase ): assert _test_patching.len is mock assert _test_patching.len is len def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '__test_patch_submodule_start_and_stop_mock__' lowerCAmelCase = patch_submodule(_test_patching , 'open' , _UpperCAmelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _SCREAMING_SNAKE_CASE (): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowerCAmelCase = '__test_patch_submodule_successive_join__' lowerCAmelCase = '__test_patch_submodule_successive_dirname__' lowerCAmelCase = '__test_patch_submodule_successive_rename__' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '__test_patch_submodule_doesnt_exist_mock__' with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCAmelCase ): pass with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCAmelCase ): pass
4
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: UpperCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) UpperCamelCase : Dict = new_module UpperCamelCase : int = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) UpperCamelCase : Union[str, Any] = tensor_name in module._buffers UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) UpperCamelCase : Optional[Any] = False UpperCamelCase : str = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase : List[str] = False UpperCamelCase : Tuple = False else: UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCamelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase : Dict = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[Any] = value.to('cpu' ) if value.dtype == torch.inta: UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: UpperCamelCase : Union[str, Any] = new_value.T UpperCamelCase : Union[str, Any] = old_value.__dict__ if is_abit: UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) UpperCamelCase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[str] = value.to(snake_case__ ) else: UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: UpperCamelCase : Optional[int] = new_value else: UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) UpperCamelCase : List[str] = new_value def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int: for name, module in model.named_children(): if current_key_name is None: UpperCamelCase : str = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape else: UpperCamelCase : Any = module.in_features UpperCamelCase : List[str] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase : Any = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase : Optional[int] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase : str = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase : int = True # Store the module class in case we need to transpose the weight later UpperCamelCase : Any = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]: UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] ) UpperCamelCase : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase : List[Any] = list(model.named_children() ) UpperCamelCase : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys UpperCamelCase : Tuple = ['.weight', '.bias'] UpperCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
40
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowercase : Optional[Any] = DiTPipeline _lowercase : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _lowercase : List[Any] = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _lowercase : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _lowercase : Optional[int] = False def _lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowercase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_lowercase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def _lowercase ( self , _lowercase , _lowercase=0 ): """simple docstring""" if str(_lowercase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(_lowercase ) else: _lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs(_lowercase ) _lowerCAmelCase = pipe(**_lowercase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowercase , 1e-3 ) def _lowercase ( self ): """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=_lowercase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _lowercase ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(_lowercase ) _lowerCAmelCase = pipe(_lowercase , generator=_lowercase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(_lowercase , _lowercase ): _lowerCAmelCase = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1e-2 def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(_lowercase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(_lowercase , _lowercase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1e-1
5
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( snake_case__ : int ) -> Dict: UpperCamelCase : Optional[Any] = tmp_path / 'file.csv' UpperCamelCase : Optional[Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]: UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' UpperCamelCase : Any = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Any = tmp_path / 'csv_with_image.csv' UpperCamelCase : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple: UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv' UpperCamelCase : Dict = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv' UpperCamelCase : Union[str, Any] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]: UpperCamelCase : str = Csv() UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case__ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(snake_case__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : List[str] = f.read().splitlines()[1] UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] ) UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() UpperCamelCase : str = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( snake_case__ : Any ) -> str: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : Any = f.read().splitlines()[1:] UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] ) UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() UpperCamelCase : List[str] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels] def UpperCamelCase ( snake_case__ : str ) -> List[Any]: UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} ) UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] ) UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) UpperCamelCase : str = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
40
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _lowerCamelCase = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _lowerCamelCase = concatenate_datasets _lowerCamelCase = DownloadConfig _lowerCamelCase = DownloadManager _lowerCamelCase = DownloadMode _lowerCamelCase = DownloadConfig _lowerCamelCase = DownloadMode _lowerCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
6
import math import random def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float: UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase : int = (expected / 100) - layer_a # Error delta UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('''Expected value: ''')) __UpperCAmelCase = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
40
0
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _snake_case ( _snake_case : Any , _snake_case : str=7 ) -> Optional[Any]: '''simple docstring''' _A = None if token is not None: _A = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) _A = '636036' _A = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' _A = requests.get(_snake_case , headers=_snake_case ).json() return result["workflow_runs"] def _snake_case ( _snake_case : List[str] ) -> Optional[int]: '''simple docstring''' _A = get_daily_ci_runs(_snake_case ) _A = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _A = workflow_run['id'] break return workflow_run_id def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any ) -> Dict: '''simple docstring''' _A = get_last_daily_ci_runs(_snake_case ) if workflow_run_id is not None: _A = get_artifacts_links(worflow_run_id=_snake_case , token=_snake_case ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _A = artifacts_links[artifact_name] download_artifact( artifact_name=_snake_case , artifact_url=_snake_case , output_dir=_snake_case , token=_snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple ) -> str: '''simple docstring''' get_last_daily_ci_artifacts(_snake_case , _snake_case , _snake_case ) _A = {} for artifact_name in artifact_names: _A = os.path.join(_snake_case , F'''{artifact_name}.zip''' ) if os.path.isfile(_snake_case ): _A = {} with zipfile.ZipFile(_snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(_snake_case ): # read the file with z.open(_snake_case ) as f: _A = f.read().decode('UTF-8' ) return results
7
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]: return EnvironmentCommand() class lowerCAmelCase_ ( a__ ): @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase : List[Any] = parser.add_parser('env' ) download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = huggingface_hub.__version__ UpperCamelCase : int = 'not installed' UpperCamelCase : Union[str, Any] = 'NA' if is_torch_available(): import torch UpperCamelCase : Any = torch.__version__ UpperCamelCase : str = torch.cuda.is_available() UpperCamelCase : Dict = 'not installed' if is_transformers_available(): import transformers UpperCamelCase : str = transformers.__version__ UpperCamelCase : Optional[Any] = 'not installed' if is_accelerate_available(): import accelerate UpperCamelCase : Dict = accelerate.__version__ UpperCamelCase : List[str] = 'not installed' if is_xformers_available(): import xformers UpperCamelCase : List[str] = xformers.__version__ UpperCamelCase : Dict = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(SCREAMING_SNAKE_CASE_ ) ) return info @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
40
0
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file lowercase__ : Optional[Any] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def _lowerCAmelCase ( __snake_case : str=None ) -> int: if subparsers is not None: __A : Tuple = subparsers.add_parser('tpu-config' , description=_description ) else: __A : Dict = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description ) # Core arguments __A : Dict = parser.add_argument_group( 'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' ) config_args.add_argument( '--config_file' , type=__snake_case , default=__snake_case , help='Path to the config file to use for accelerate.' , ) config_args.add_argument( '--tpu_name' , default=__snake_case , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , ) config_args.add_argument( '--tpu_zone' , default=__snake_case , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , ) __A : Optional[Any] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' ) pod_args.add_argument( '--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , ) pod_args.add_argument( '--command_file' , default=__snake_case , help='The path to the file containing the commands to run on the pod on startup.' , ) pod_args.add_argument( '--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , ) pod_args.add_argument( '--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , ) pod_args.add_argument( '--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , ) pod_args.add_argument( '--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' ) if subparsers is not None: parser.set_defaults(func=__snake_case ) return parser def _lowerCAmelCase ( __snake_case : Dict ) -> Optional[int]: __A : Tuple = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__snake_case ): __A : Tuple = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: __A : Optional[int] = defaults.command_file if not args.command and defaults.commands is not None: __A : List[str] = defaults.commands if not args.tpu_name: __A : Tuple = defaults.tpu_name if not args.tpu_zone: __A : Union[str, Any] = defaults.tpu_zone if args.accelerate_version == "dev": __A : Union[str, Any] = 'git+https://github.com/huggingface/accelerate.git' elif args.accelerate_version == "latest": __A : Optional[Any] = 'accelerate -U' elif isinstance(parse(args.accelerate_version ) , __snake_case ): __A : Tuple = f'accelerate=={args.accelerate_version}' if not args.command_file and not args.command: raise ValueError('You must specify either a command file or a command to run on the pod.' ) if args.command_file: with open(args.command_file , 'r' ) as f: __A : str = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __snake_case ): __A : int = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate __A : Any = ['cd /usr/share'] if args.install_accelerate: new_cmd += [f'pip install {args.accelerate_version}'] new_cmd += args.command __A : List[str] = '; '.join(__snake_case ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess __A : Dict = ['gcloud'] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f'Running {" ".join(__snake_case )}' ) return subprocess.run(__snake_case ) print('Successfully setup pod.' ) def _lowerCAmelCase ( ) -> List[str]: __A : int = tpu_command_parser() __A : Union[str, Any] = parser.parse_args() tpu_command_launcher(__snake_case )
8
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''▁''' __UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } __UpperCAmelCase = { '''facebook/xglm-564M''': 2_048, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None: UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCamelCase : Any = 7 UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} UpperCamelCase : Optional[int] = len(self.sp_model ) UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: UpperCamelCase : int = self.__dict__.copy() UpperCamelCase : Union[str, Any] = None UpperCamelCase : int = self.sp_model.serialized_model_proto() return state def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase : Any = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): UpperCamelCase : Any = {} UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCamelCase : Optional[int] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : str = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case_ ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip() return out_string def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi: UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
40
0
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , *_snake_case : Optional[int] , **_snake_case : Optional[Any] ): """simple docstring""" warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
9
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCAmelCase = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __UpperCAmelCase = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] UpperCAmelCase__ : Dict = RobertaTokenizer def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) ) UpperCamelCase : List[str] = add_prefix_space UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = add_prefix_space UpperCamelCase : Optional[Any] = 'post_processor' UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase : Optional[Any] = tuple(state['sep'] ) if "cls" in state: UpperCamelCase : Optional[int] = tuple(state['cls'] ) UpperCamelCase : Any = False if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Optional[int] = add_prefix_space UpperCamelCase : List[Any] = True if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase : Dict = trim_offsets UpperCamelCase : Union[str, Any] = True if changes_to_apply: UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) ) UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase : List[Any] = value def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple: UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
40
0
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _lowerCAmelCase = datasets.load_iris() _lowerCAmelCase = np.array(data["data"]) _lowerCAmelCase = np.array(data["target"]) _lowerCAmelCase = data["target_names"] _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase = train_test_split(X, y) def _snake_case ( __snake_case , __snake_case ): return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) ) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=5 ): _UpperCamelCase = zip(__snake_case , __snake_case ) # List of distances of all points from the point to be classified _UpperCamelCase = [] for data_point in data: _UpperCamelCase = euclidean_distance(data_point[0] , __snake_case ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. _UpperCamelCase = [i[1] for i in sorted(__snake_case )[:k]] # Most commonly occurring class among them # is the class into which the point is classified _UpperCamelCase = Counter(__snake_case ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
10
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple: super().__init__(features=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch_tensor_kwargs import torch # noqa import torch at initialization def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict: import torch if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any: import torch if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ): return value.tolist() UpperCamelCase : str = {} if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ): UpperCamelCase : List[str] = {'dtype': torch.intaa} elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ): UpperCamelCase : int = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: import torch # support for torch, tf, jax etc. if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ): UpperCamelCase : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor": UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] ) UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: UpperCamelCase : str = self._consolidate(batch[column_name] ) return batch
40
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class __A ( metaclass=A ): '''simple docstring''' __lowerCamelCase : Optional[int] = ['torch', 'transformers', 'onnx'] def __init__(self , *A , **A ) -> Any: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> int: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __A ( metaclass=A ): '''simple docstring''' __lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__(self , *A , **A ) -> Any: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> int: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __A ( metaclass=A ): '''simple docstring''' __lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__(self , *A , **A ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> Dict: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> int: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __A ( metaclass=A ): '''simple docstring''' __lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__(self , *A , **A ) -> str: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> int: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __A ( metaclass=A ): '''simple docstring''' __lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__(self , *A , **A ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __A ( metaclass=A ): '''simple docstring''' __lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__(self , *A , **A ) -> List[str]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a__ (cls , *A , **A ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
11
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: UpperCamelCase : int = ( 'Wrong input data\'s dimensions... ' F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(snake_case__ ) try: if dataset.shape[1] != value_array.shape[1]: UpperCamelCase : str = ( 'Wrong input data\'s shape... ' F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(snake_case__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: UpperCamelCase : Dict = ( 'Input data have different datatype... ' F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(snake_case__ ) UpperCamelCase : List[Any] = [] for value in value_array: UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] ) UpperCamelCase : Dict = dataset[0].tolist() for dataset_value in dataset[1:]: UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ ) if dist > temp_dist: UpperCamelCase : str = temp_dist UpperCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ )) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = """ylacombe/bark-small""" lowercase__ : Dict = tempfile.mkdtemp() lowercase__ : Any = """en_speaker_1""" lowercase__ : Optional[int] = """This is a test string""" lowercase__ : Tuple = """speaker_embeddings_path.json""" lowercase__ : str = """speaker_embeddings""" def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.get_tokenizer() lowercase__ : int = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_) processor.save_pretrained(self.tmpdirname) lowercase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowercase__ : Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase__ : Optional[int] = 35 lowercase__ : Tuple = 2 lowercase__ : Dict = 8 lowercase__ : Optional[int] = { """semantic_prompt""": np.ones(SCREAMING_SNAKE_CASE_), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len)), """fine_prompt""": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowercase__ : Tuple = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist()) # test loading voice preset from npz file lowercase__ : List[Any] = os.path.join(self.tmpdirname , """file.npz""") np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : str = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist()) # test loading voice preset from the hub lowercase__ : int = processor(text=self.input_string , voice_preset=self.voice_preset) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.get_tokenizer() lowercase__ : str = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = processor(text=self.input_string) lowercase__ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
12
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) __UpperCAmelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = '''Normal''' if result[0][0] == 1: __UpperCAmelCase = '''Abnormality detected'''
40
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> bool: __lowerCamelCase : Dict = [int(UpperCAmelCase_ ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(UpperCAmelCase_ ) == 4 and all(0 <= int(UpperCAmelCase_ ) <= 2_54 for octet in octets ) if __name__ == "__main__": A__ : Optional[Any] = input().strip() A__ : Tuple = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
13
import os import pytest from attr import dataclass __UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" UpperCAmelCase__ : Union[str, Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self ) -> str: return F"""{self.framework}-transfromers-test""" @property def snake_case_ ( self ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
40
0
a__ = { "joule": 1.0, "kilojoule": 1000, "megajoule": 1000000, "gigajoule": 1000000000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 3600000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 4186800.00, "electronvolt": 1.6_0217_6634E-19, "britishthermalunit_it": 1055.05585, "footpound": 1.355818, } def __UpperCAmelCase ( __a : str ,__a : str ,__a : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _a : Optional[Any] = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(__a )}""" ) raise ValueError(__a ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
14
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = '''src/transformers''' __UpperCAmelCase = '''docs/source/en/tasks''' def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]: with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCamelCase : Optional[Any] = f.readlines() # Find the start prompt. UpperCamelCase : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 UpperCamelCase : Optional[Any] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]: UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide] UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) UpperCamelCase : Tuple = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
40
0
A : List[str] = [ (1_0_0_0, 'M'), (9_0_0, 'CM'), (5_0_0, 'D'), (4_0_0, 'CD'), (1_0_0, 'C'), (9_0, 'XC'), (5_0, 'L'), (4_0, 'XL'), (1_0, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def UpperCamelCase ( __magic_name__ : str ) -> int: """simple docstring""" lowercase__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} lowercase__ = 0 lowercase__ = 0 while place < len(__magic_name__ ): if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def UpperCamelCase ( __magic_name__ : int ) -> str: """simple docstring""" lowercase__ = [] for arabic, roman in ROMAN: ((lowercase__) , (lowercase__)) = divmod(__magic_name__ , __magic_name__ ) result.append(roman * factor ) if number == 0: break return "".join(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
15
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : int = IFPipeline UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ) -> str: return self._get_dummy_components() def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case_ ( self ) -> Optional[int]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def snake_case_ ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ) -> Dict: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ) -> Optional[int]: self._test_save_load_local() def snake_case_ ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def snake_case_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> List[Any]: # if UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa ) UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Tuple = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : List[Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
40
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A : List[Any] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = question_encoder SCREAMING_SNAKE_CASE = generator SCREAMING_SNAKE_CASE = self.question_encoder def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] ): if os.path.isfile(__lowerCamelCase ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "question_encoder_tokenizer" ) SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "generator_tokenizer" ) self.question_encoder.save_pretrained(__lowerCamelCase ) self.generator.save_pretrained(__lowerCamelCase ) @classmethod def _snake_case ( cls : Tuple , __lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer SCREAMING_SNAKE_CASE = kwargs.pop("config" , __lowerCamelCase ) if config is None: SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( __lowerCamelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( __lowerCamelCase , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=__lowerCamelCase , generator=__lowerCamelCase ) def __call__( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ): return self.current_tokenizer(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ): return self.generator.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ): return self.generator.decode(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.question_encoder def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.generator def _snake_case ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "longest" , __lowerCamelCase : str = None , __lowerCamelCase : bool = True , **__lowerCamelCase : List[str] , ): warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , __lowerCamelCase , ) if max_length is None: SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length SCREAMING_SNAKE_CASE = self( __lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length SCREAMING_SNAKE_CASE = self( text_target=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = labels["input_ids"] return model_inputs
16
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase ( snake_case__ : Tuple="" ) -> str: UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> int: UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) ) def snake_case_ ( self ) -> Any: UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' ) sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 ) UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) ) UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = 'Hey!' UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
0
import argparse from collections import defaultdict import yaml UpperCAmelCase_ : List[str] = '''docs/source/en/_toctree.yml''' def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Union[str, Any]: __A : Union[str, Any] = defaultdict(a__ ) __A : Dict = [] __A : Dict = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(a__ ) __A : Optional[int] = new_doc_list __A : Optional[int] = [key for key, value in counts.items() if value > 1] __A : Tuple = [] for duplicate_key in duplicates: __A : Union[str, Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(a__ ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) __A : str = sorted(a__ ,key=lambda a__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(a__ ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(a__ ) # Sort return overview_doc def __SCREAMING_SNAKE_CASE ( a__ : Tuple=False ) -> List[str]: with open(a__ ,encoding="""utf-8""" ) as f: __A : Optional[Any] = yaml.safe_load(f.read() ) # Get to the API doc __A : List[str] = 0 while content[api_idx]["title"] != "API": api_idx += 1 __A : Tuple = content[api_idx]["""sections"""] # Then to the model doc __A : List[Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __A : List[Any] = api_doc[scheduler_idx]["""sections"""] __A : Tuple = clean_doc_toc(a__ ) __A : Tuple = False if new_scheduler_doc != scheduler_doc: __A : List[str] = True if overwrite: __A : List[str] = new_scheduler_doc if diff: if overwrite: __A : Optional[int] = api_doc with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(yaml.dump(a__ ,allow_unicode=a__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __SCREAMING_SNAKE_CASE ( a__ : Any=False ) -> Any: with open(a__ ,encoding="""utf-8""" ) as f: __A : Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc __A : Union[str, Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 __A : Dict = content[api_idx]["""sections"""] # Then to the model doc __A : Dict = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __A : Optional[Any] = False __A : List[str] = api_doc[pipeline_idx]["""sections"""] __A : Optional[int] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __A : List[Any] = pipeline_doc["""section"""] __A : int = clean_doc_toc(a__ ) if overwrite: __A : Dict = new_sub_pipeline_doc new_pipeline_docs.append(a__ ) # sort overall pipeline doc __A : Tuple = clean_doc_toc(a__ ) if new_pipeline_docs != pipeline_docs: __A : List[str] = True if overwrite: __A : List[Any] = new_pipeline_docs if diff: if overwrite: __A : Optional[Any] = api_doc with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(yaml.dump(a__ ,allow_unicode=a__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCAmelCase_ : Tuple = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
17
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : int = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase : List[Any] = [] UpperCamelCase : List[Any] = list(range(snake_case__ ) ) # Find permutation while factorials: UpperCamelCase : int = factorials.pop() UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
40
0
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = "▁" _SCREAMING_SNAKE_CASE = {"vocab_file": "prophetnet.tokenizer"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } _SCREAMING_SNAKE_CASE = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } _SCREAMING_SNAKE_CASE = { "microsoft/xprophetnet-large-wiki100-cased": 5_12, } def __a(SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' _lowerCAmelCase = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as reader: _lowerCAmelCase = reader.readlines() for index, token in enumerate(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = token.rstrip("\n" ) _lowerCAmelCase = index return vocab class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES __lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : str = ["input_ids", "attention_mask"] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) _lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab _lowerCAmelCase = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10 ): _lowerCAmelCase = f'''[unused{i}]''' _lowerCAmelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab _lowerCAmelCase = 12 _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_lowerCAmelCase ) def __getstate__( self ) -> Dict: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ) -> str: _lowerCAmelCase = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + [1] return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ) -> List[str]: return len(self.sp_model ) + self.fairseq_offset def _snake_case ( self ) -> int: _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , _lowerCAmelCase ) -> str: return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase = self.sp_model.PieceToId(_lowerCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] _lowerCAmelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
18
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase_ ( a__ ): def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) ) class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any: UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : List[Any] = image_size UpperCamelCase : List[str] = patch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 ) UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[int] = conv_kernel_size UpperCamelCase : List[str] = output_stride UpperCamelCase : Union[str, Any] = classifier_dropout_prob UpperCamelCase : List[Any] = use_labels UpperCamelCase : Any = is_training UpperCamelCase : int = num_labels UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Tuple = scope UpperCamelCase : List[str] = width_multiplier UpperCamelCase : Any = ffn_dropout UpperCamelCase : List[Any] = attn_dropout def snake_case_ ( self ) -> int: UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[str] = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def snake_case_ ( self ) -> int: return MobileViTVaConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Any = self.num_labels UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ : Any = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Dict = MobileViTVaModelTester(self ) UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def snake_case_ ( self ) -> int: pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def snake_case_ ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> List[str]: UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : str = [*signature.parameters.keys()] UpperCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Tuple = outputs.hidden_states UpperCamelCase : Dict = 5 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase : Any = 2 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[int] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> str: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) @slow def snake_case_ ( self ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Tuple: UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> str: return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ], device=SCREAMING_SNAKE_CASE_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Tuple = prepare_img() UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = outputs.logits.detach().cpu() UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] ) UpperCamelCase : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
40
0
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _a = datasets.logging.get_logger(__name__) _a = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ _a = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ _a = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ _a = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase( datasets.Metric ): def UpperCAmelCase ( self) -> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def UpperCAmelCase ( self , __a) -> Union[str, Any]: '''simple docstring''' # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''') _UpperCamelCase = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: _UpperCamelCase = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: _UpperCamelCase = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''') # download the model checkpoint specified by self.config_name and set up the scorer _UpperCamelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name]) _UpperCamelCase = score.BleurtScorer(os.path.join(__a , __a)) def UpperCAmelCase ( self , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = self.scorer.score(references=__a , candidates=__a) return {"scores": scores}
19
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str: UpperCamelCase : List[str] = [0] * len(snake_case__ ) UpperCamelCase : int = [] UpperCamelCase : Optional[int] = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: UpperCamelCase : Optional[int] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase : Tuple = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph __UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
40
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase_ (unittest.TestCase ): def __UpperCamelCase ( self) -> Dict: a__ ={ 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } a__ ={ 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(lowercase_) , lowercase_) def __UpperCamelCase ( self) -> Any: a__ =np.random.randn(3 , 4) self.assertTrue(np.allclose(transpose(lowercase_) , x.transpose())) a__ =np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0)))) @require_torch def __UpperCamelCase ( self) -> List[str]: a__ =np.random.randn(3 , 4) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy())) a__ =np.random.randn(3 , 4 , 5) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy())) @require_tf def __UpperCamelCase ( self) -> List[str]: a__ =np.random.randn(3 , 4) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy())) a__ =np.random.randn(3 , 4 , 5) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy())) @require_flax def __UpperCamelCase ( self) -> Optional[int]: a__ =np.random.randn(3 , 4) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_) , np.asarray(transpose(lowercase_)))) a__ =np.random.randn(3 , 4 , 5) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0))))) def __UpperCamelCase ( self) -> List[str]: a__ =np.random.randn(3 , 4) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.reshape(lowercase_ , (4, 3)))) a__ =np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.reshape(lowercase_ , (12, 5)))) @require_torch def __UpperCamelCase ( self) -> int: a__ =np.random.randn(3 , 4) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy())) a__ =np.random.randn(3 , 4 , 5) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy())) @require_tf def __UpperCamelCase ( self) -> Dict: a__ =np.random.randn(3 , 4) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy())) a__ =np.random.randn(3 , 4 , 5) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy())) @require_flax def __UpperCamelCase ( self) -> Any: a__ =np.random.randn(3 , 4) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.asarray(reshape(lowercase_ , (4, 3))))) a__ =np.random.randn(3 , 4 , 5) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.asarray(reshape(lowercase_ , (12, 5))))) def __UpperCamelCase ( self) -> Tuple: a__ =np.random.randn(1 , 3 , 4) self.assertTrue(np.allclose(squeeze(lowercase_) , np.squeeze(lowercase_))) a__ =np.random.randn(1 , 4 , 1 , 5) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.squeeze(lowercase_ , axis=2))) @require_torch def __UpperCamelCase ( self) -> Dict: a__ =np.random.randn(1 , 3 , 4) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy())) a__ =np.random.randn(1 , 4 , 1 , 5) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy())) @require_tf def __UpperCamelCase ( self) -> List[Any]: a__ =np.random.randn(1 , 3 , 4) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy())) a__ =np.random.randn(1 , 4 , 1 , 5) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy())) @require_flax def __UpperCamelCase ( self) -> int: a__ =np.random.randn(1 , 3 , 4) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_) , np.asarray(squeeze(lowercase_)))) a__ =np.random.randn(1 , 4 , 1 , 5) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.asarray(squeeze(lowercase_ , axis=2)))) def __UpperCamelCase ( self) -> Optional[Any]: a__ =np.random.randn(3 , 4) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.expand_dims(lowercase_ , axis=1))) @require_torch def __UpperCamelCase ( self) -> Union[str, Any]: a__ =np.random.randn(3 , 4) a__ =torch.tensor(lowercase_) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy())) @require_tf def __UpperCamelCase ( self) -> Tuple: a__ =np.random.randn(3 , 4) a__ =tf.constant(lowercase_) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy())) @require_flax def __UpperCamelCase ( self) -> Optional[Any]: a__ =np.random.randn(3 , 4) a__ =jnp.array(lowercase_) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.asarray(expand_dims(lowercase_ , axis=1))))
20
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
40
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) class __A ( UpperCamelCase__ ): UpperCamelCase = ["""pixel_values"""] def __init__( self :Optional[int] , __snake_case :bool = True , __snake_case :Union[int, float] = 1 / 2_55 , __snake_case :bool = True , __snake_case :int = 8 , **__snake_case :int , ): '''simple docstring''' super().__init__(**__snake_case ) __magic_name__ : Optional[Any] =do_rescale __magic_name__ : List[Any] =rescale_factor __magic_name__ : Dict =do_pad __magic_name__ : Tuple =pad_size def A__ ( self :List[str] , __snake_case :np.ndarray , __snake_case :float , __snake_case :Optional[Union[str, ChannelDimension]] = None , **__snake_case :Tuple ): '''simple docstring''' return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def A__ ( self :List[Any] , __snake_case :np.ndarray , __snake_case :int , __snake_case :Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' __magic_name__ , __magic_name__ : Optional[int] =get_image_size(__snake_case ) __magic_name__ : List[Any] =(old_height // size + 1) * size - old_height __magic_name__ : Union[str, Any] =(old_width // size + 1) * size - old_width return pad(__snake_case , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__snake_case ) def A__ ( self :Union[str, Any] , __snake_case :ImageInput , __snake_case :Optional[bool] = None , __snake_case :Optional[float] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[int] = None , __snake_case :Optional[Union[str, TensorType]] = None , __snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST , **__snake_case :Tuple , ): '''simple docstring''' __magic_name__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : Dict =do_pad if do_pad is not None else self.do_pad __magic_name__ : Union[str, Any] =pad_size if pad_size is not None else self.pad_size __magic_name__ : int =make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __magic_name__ : Optional[int] =[to_numpy_array(__snake_case ) for image in images] if do_rescale: __magic_name__ : Any =[self.rescale(image=__snake_case , scale=__snake_case ) for image in images] if do_pad: __magic_name__ : Optional[Any] =[self.pad(__snake_case , size=__snake_case ) for image in images] __magic_name__ : Any =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images] __magic_name__ : Dict ={"""pixel_values""": images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
21
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class A ( _a ,unittest.TestCase ): lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def __lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" _a = '''[PAD]''' _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" _a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(lowerCAmelCase_ ) , 10_12 ) def __lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_12 ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" _a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) _a = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) _a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" _a = '''Hello World!''' _a = [3_53_89, 66_72, 49, 2] self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" _a = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") snake_case__ : List[str] = logging.getLogger(__name__) @dataclass class _a : """simple docstring""" A_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) A_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class _a : """simple docstring""" A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: if self.train_file is not None: UpperCamelCase_ = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCamelCase_ = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _a : """simple docstring""" A_ = 42 A_ = True A_ = None A_ = None def __call__( self , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = 'label' if 'label' in features[0].keys() else 'labels' UpperCamelCase_ = [feature.pop(_UpperCAmelCase ) for feature in features] UpperCamelCase_ = len(_UpperCAmelCase ) UpperCamelCase_ = len(features[0]['input_ids'] ) UpperCamelCase_ = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] UpperCamelCase_ = list(chain(*_UpperCAmelCase ) ) UpperCamelCase_ = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten UpperCamelCase_ = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels UpperCamelCase_ = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def _snake_case (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __lowercase , __lowercase) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase_ = training_args.get_process_log_level() logger.setLevel(__lowercase) datasets.utils.logging.set_verbosity(__lowercase) transformers.utils.logging.set_verbosity(__lowercase) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") logger.info(f"""Training/evaluation parameters {training_args}""") # Detecting last checkpoint. UpperCamelCase_ = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase_ = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.') elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCamelCase_ = {} if data_args.train_file is not None: UpperCamelCase_ = data_args.train_file if data_args.validation_file is not None: UpperCamelCase_ = data_args.validation_file UpperCamelCase_ = data_args.train_file.split('.')[-1] UpperCamelCase_ = load_dataset( __lowercase , data_files=__lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCamelCase_ = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCamelCase_ = [f"""ending{i}""" for i in range(4)] UpperCamelCase_ = 'sent1' UpperCamelCase_ = 'sent2' if data_args.max_seq_length is None: UpperCamelCase_ = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.') UpperCamelCase_ = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""") UpperCamelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length) # Preprocessing the datasets. def preprocess_function(__lowercase): UpperCamelCase_ = [[context] * 4 for context in examples[context_name]] UpperCamelCase_ = examples[question_header_name] UpperCamelCase_ = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__lowercase) ] # Flatten out UpperCamelCase_ = list(chain(*__lowercase)) UpperCamelCase_ = list(chain(*__lowercase)) # Tokenize UpperCamelCase_ = tokenizer( __lowercase , __lowercase , truncation=__lowercase , max_length=__lowercase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__lowercase) , 4)] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset') UpperCamelCase_ = raw_datasets['train'] if data_args.max_train_samples is not None: UpperCamelCase_ = min(len(__lowercase) , data_args.max_train_samples) UpperCamelCase_ = train_dataset.select(range(__lowercase)) with training_args.main_process_first(desc='train dataset map pre-processing'): UpperCamelCase_ = train_dataset.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset') UpperCamelCase_ = raw_datasets['validation'] if data_args.max_eval_samples is not None: UpperCamelCase_ = min(len(__lowercase) , data_args.max_eval_samples) UpperCamelCase_ = eval_dataset.select(range(__lowercase)) with training_args.main_process_first(desc='validation dataset map pre-processing'): UpperCamelCase_ = eval_dataset.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCamelCase_ = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__lowercase , pad_to_multiple_of=8 if training_args.fpaa else None) ) # Metric def compute_metrics(__lowercase): UpperCamelCase_ , UpperCamelCase_ = eval_predictions UpperCamelCase_ = np.argmax(__lowercase , axis=1) return {"accuracy": (preds == label_ids).astype(np.floataa).mean().item()} # Initialize our Trainer UpperCamelCase_ = Trainer( model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , ) # Training if training_args.do_train: UpperCamelCase_ = None if training_args.resume_from_checkpoint is not None: UpperCamelCase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase_ = last_checkpoint UpperCamelCase_ = trainer.train(resume_from_checkpoint=__lowercase) trainer.save_model() # Saves the tokenizer too for easy upload UpperCamelCase_ = train_result.metrics UpperCamelCase_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase) ) UpperCamelCase_ = min(__lowercase , len(__lowercase)) trainer.log_metrics('train' , __lowercase) trainer.save_metrics('train' , __lowercase) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***') UpperCamelCase_ = trainer.evaluate() UpperCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase) UpperCamelCase_ = min(__lowercase , len(__lowercase)) trainer.log_metrics('eval' , __lowercase) trainer.save_metrics('eval' , __lowercase) UpperCamelCase_ = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase) else: trainer.create_model_card(**__lowercase) def _snake_case (__lowercase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
23
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''ViTFeatureExtractor'''] __UpperCAmelCase = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCAmelCase_ : Tuple = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' UpperCAmelCase_ : Optional[Any] = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' UpperCAmelCase_ : int = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] )-> str: '''simple docstring''' return float((preds == labels).mean() ) def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[str] )-> Tuple: '''simple docstring''' __snake_case = simple_accuracy(_lowerCamelCase , _lowerCamelCase ) __snake_case = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] )-> str: '''simple docstring''' __snake_case = np.array(_lowerCamelCase ) __snake_case = np.array(_lowerCamelCase ) __snake_case = en_sentvecs.shape[0] # mean centering __snake_case = en_sentvecs - np.mean(_lowerCamelCase , axis=0 ) __snake_case = in_sentvecs - np.mean(_lowerCamelCase , axis=0 ) __snake_case = cdist(_lowerCamelCase , _lowerCamelCase , '''cosine''' ) __snake_case = np.array(range(_lowerCamelCase ) ) __snake_case = sim.argsort(axis=1 )[:, :10] __snake_case = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCAmelCase ( datasets.Metric): def lowerCAmelCase ( self ) -> str: '''simple docstring''' if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
24
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __UpperCAmelCase = random.Random() def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any: if rng is None: UpperCamelCase : int = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]: UpperCamelCase : Dict = parent UpperCamelCase : Dict = batch_size UpperCamelCase : Any = min_seq_length UpperCamelCase : Optional[int] = max_seq_length UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Tuple = feature_size UpperCamelCase : Any = padding_value UpperCamelCase : Tuple = sampling_rate UpperCamelCase : Optional[Any] = return_attention_mask UpperCamelCase : Optional[Any] = do_normalize def snake_case_ ( self ) -> Union[str, Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Union[str, Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Any = WavaVecaFeatureExtractor def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) ) def snake_case_ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test batched UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : Any = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Tuple = range(800, 1400, 200 ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths] UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : List[str] = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : int = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' ) UpperCamelCase : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' ) UpperCamelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def snake_case_ ( self ) -> str: import torch UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def snake_case_ ( self ) -> Tuple: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
40
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , a : Any , a : str=13 , a : Union[str, Any]=7 , a : int=True , a : Optional[Any]=True , a : Optional[int]=True , a : int=True , a : List[Any]=99 , a : List[Any]=16 , a : str=36 , a : Dict=6 , a : str=6 , a : List[str]=6 , a : Any=37 , a : Optional[Any]="gelu" , a : List[str]=0.1 , a : Tuple=0.1 , a : int=512 , a : Optional[Any]=16 , a : List[str]=2 , a : List[str]=0.02 , a : int=3 , a : int=4 , a : Union[str, Any]=None , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : List[str] = batch_size SCREAMING_SNAKE_CASE : int = seq_length SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask SCREAMING_SNAKE_CASE : str = use_token_type_ids SCREAMING_SNAKE_CASE : int = use_labels SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = embedding_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_hidden_groups SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = num_labels SCREAMING_SNAKE_CASE : int = num_choices SCREAMING_SNAKE_CASE : str = scope def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : List[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __UpperCamelCase ( self : Union[str, Any] , a : Optional[int] , a : Optional[Any] , a : int , a : Dict , a : int , a : Union[str, Any] , a : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = AlbertModel(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a , token_type_ids=a ) SCREAMING_SNAKE_CASE : Optional[int] = model(a , token_type_ids=a ) SCREAMING_SNAKE_CASE : Dict = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase ( self : str , a : int , a : Optional[int] , a : Optional[Any] , a : Dict , a : str , a : int , a : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : int = AlbertForPreTraining(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = model( a , attention_mask=a , token_type_ids=a , labels=a , sentence_order_label=a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __UpperCamelCase ( self : Any , a : Any , a : Any , a : Optional[int] , a : str , a : int , a : str , a : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = AlbertForMaskedLM(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : str , a : Tuple , a : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : Optional[int] , a : List[Any] , a : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = AlbertForQuestionAnswering(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model( a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : List[Any] , a : Tuple , a : Tuple , a : int , a : List[Any] , a : List[Any] , a : Dict , a : Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.num_labels SCREAMING_SNAKE_CASE : Optional[int] = AlbertForSequenceClassification(a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : int = model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : List[str] , a : List[str] , a : str , a : Optional[int] , a : Optional[int] , a : str , a : List[Any] , a : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertForTokenClassification(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : str , a : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Union[str, Any] , a : List[Any] , a : str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.num_choices SCREAMING_SNAKE_CASE : Any = AlbertForMultipleChoice(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE : List[str] = model( a , attention_mask=a , token_type_ids=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) , ) : int = config_and_inputs SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True def __UpperCamelCase ( self : List[str] , a : int , a : List[str] , a : Optional[int]=False ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class in get_values(a ): SCREAMING_SNAKE_CASE : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = AlbertModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 ) def __UpperCamelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a ) def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) def __UpperCamelCase ( self : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def __UpperCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE : Any = type self.model_tester.create_and_check_model(*a ) @slow def __UpperCamelCase ( self : Tuple ) -> str: """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[Any] = AlbertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = AlbertModel.from_pretrained("albert-base-v2" ) SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a )[0] SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
25
def UpperCamelCase ( snake_case__ : int ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCamelCase : int = False if num < 0: UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _A ( unittest.TestCase ): @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Any = pipeline( task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" ) __snake_case : Optional[Any] = load_dataset("""ashraq/esc50""" ) __snake_case : Dict = dataset["""train"""]["""audio"""][-1]["""array"""] __snake_case : str = audio_classifier(__magic_name__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__magic_name__ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , ) @unittest.skip("""No models are available in TF""" ) def lowercase__ ( self : Union[str, Any] ) -> str: """simple docstring""" pass @slow @require_torch def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = pipeline( task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , ) # This is an audio of a dog __snake_case : Tuple = load_dataset("""ashraq/esc50""" ) __snake_case : Union[str, Any] = dataset["""train"""]["""audio"""][-1]["""array"""] __snake_case : str = audio_classifier(__magic_name__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__magic_name__ ) , [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ] , ) __snake_case : Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__magic_name__ ) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) __snake_case : Optional[int] = audio_classifier( [audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 ) self.assertEqual( nested_simplify(__magic_name__ ) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) @unittest.skip("""No models are available in TF""" ) def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" pass
26
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: UpperCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) UpperCamelCase : Dict = new_module UpperCamelCase : int = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) UpperCamelCase : Union[str, Any] = tensor_name in module._buffers UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) UpperCamelCase : Optional[Any] = False UpperCamelCase : str = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase : List[str] = False UpperCamelCase : Tuple = False else: UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCamelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase : Dict = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[Any] = value.to('cpu' ) if value.dtype == torch.inta: UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: UpperCamelCase : Union[str, Any] = new_value.T UpperCamelCase : Union[str, Any] = old_value.__dict__ if is_abit: UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) UpperCamelCase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[str] = value.to(snake_case__ ) else: UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: UpperCamelCase : Optional[int] = new_value else: UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) UpperCamelCase : List[str] = new_value def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int: for name, module in model.named_children(): if current_key_name is None: UpperCamelCase : str = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape else: UpperCamelCase : Any = module.in_features UpperCamelCase : List[str] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase : Any = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase : Optional[int] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase : str = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase : int = True # Store the module class in case we need to transpose the weight later UpperCamelCase : Any = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]: UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] ) UpperCamelCase : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase : List[Any] = list(model.named_children() ) UpperCamelCase : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys UpperCamelCase : Tuple = ['.weight', '.bias'] UpperCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
40
0
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __A : Optional[int] = logging.get_logger(__name__) # General docstring __A : Optional[Any] = "PoolFormerConfig" # Base docstring __A : Tuple = "sail/poolformer_s12" __A : List[str] = [1, 512, 7, 7] # Image classification docstring __A : Optional[int] = "sail/poolformer_s12" __A : Union[str, Any] = "tabby, tabby cat" __A : int = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = False ) -> str: """simple docstring""" if drop_prob == 0.0 or not training: return input _A = 1 - drop_prob _A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _A = keep_prob + torch.rand(_SCREAMING_SNAKE_CASE , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _A = input.div(_SCREAMING_SNAKE_CASE ) * random_tensor return output class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ = None ): super().__init__() _A = drop_prob def lowerCAmelCase__ ( self , snake_case_ ): return drop_path(snake_case_ , self.drop_prob , self.training ) def lowerCAmelCase__ ( self ): return "p={}".format(self.drop_prob ) class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__() _A = patch_size if isinstance(snake_case_ , collections.abc.Iterable ) else (patch_size, patch_size) _A = stride if isinstance(snake_case_ , collections.abc.Iterable ) else (stride, stride) _A = padding if isinstance(snake_case_ , collections.abc.Iterable ) else (padding, padding) _A = nn.Convad(snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=snake_case_ ) _A = norm_layer(snake_case_ ) if norm_layer else nn.Identity() def lowerCAmelCase__ ( self , snake_case_ ): _A = self.projection(snake_case_ ) _A = self.norm(snake_case_ ) return embeddings class lowerCamelCase( nn.GroupNorm ): '''simple docstring''' def __init__( self , snake_case_ , **snake_case_ ): super().__init__(1 , snake_case_ , **snake_case_ ) class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ ): super().__init__() _A = nn.AvgPoolad(snake_case_ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case_ ) def lowerCAmelCase__ ( self , snake_case_ ): return self.pool(snake_case_ ) - hidden_states class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): super().__init__() _A = nn.Convad(snake_case_ , snake_case_ , 1 ) _A = nn.Convad(snake_case_ , snake_case_ , 1 ) _A = PoolFormerDropPath(snake_case_ ) if isinstance(config.hidden_act , snake_case_ ): _A = ACTaFN[config.hidden_act] else: _A = config.hidden_act def lowerCAmelCase__ ( self , snake_case_ ): _A = self.conva(snake_case_ ) _A = self.act_fn(snake_case_ ) _A = self.drop(snake_case_ ) _A = self.conva(snake_case_ ) _A = self.drop(snake_case_ ) return hidden_states class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): super().__init__() _A = PoolFormerPooling(snake_case_ ) _A = PoolFormerOutput(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _A = PoolFormerGroupNorm(snake_case_ ) _A = PoolFormerGroupNorm(snake_case_ ) # Useful for training neural nets _A = PoolFormerDropPath(snake_case_ ) if drop_path > 0.0 else nn.Identity() _A = config.use_layer_scale if config.use_layer_scale: _A = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case_) ) , requires_grad=snake_case_ ) _A = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case_) ) , requires_grad=snake_case_ ) def lowerCAmelCase__ ( self , snake_case_ ): if self.use_layer_scale: _A = self.pooling(self.before_norm(snake_case_ ) ) _A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _A = hidden_states + self.drop_path(snake_case_ ) _A = () _A = self.output(self.after_norm(snake_case_ ) ) _A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _A = hidden_states + self.drop_path(snake_case_ ) _A = (output,) + outputs return outputs else: _A = self.drop_path(self.pooling(self.before_norm(snake_case_ ) ) ) # First residual connection _A = pooling_output + hidden_states _A = () # Second residual connection inside the PoolFormerOutput block _A = self.drop_path(self.output(self.after_norm(snake_case_ ) ) ) _A = hidden_states + layer_output _A = (output,) + outputs return outputs class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ ): super().__init__() _A = config # stochastic depth decay rule _A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings _A = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) _A = nn.ModuleList(snake_case_ ) # Transformer blocks _A = [] _A = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _A = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case_ ) ) _A = nn.ModuleList(snake_case_ ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_=False , snake_case_=True ): _A = () if output_hidden_states else None _A = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): _A, _A = layers # Get patch embeddings from hidden_states _A = embedding_layer(snake_case_ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case_ ): _A = blk(snake_case_ ) _A = layer_outputs[0] if output_hidden_states: _A = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ ) class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = PoolFormerConfig __magic_name__ = 'poolformer' __magic_name__ = 'pixel_values' __magic_name__ = True def lowerCAmelCase__ ( self , snake_case_ ): if isinstance(snake_case_ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case_ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_=False ): if isinstance(snake_case_ , snake_case_ ): _A = value __A : Tuple = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A : str = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __snake_case , ) class lowerCamelCase( __snake_case ): '''simple docstring''' def __init__( self , snake_case_ ): super().__init__(snake_case_ ) _A = config _A = PoolFormerEncoder(snake_case_ ) # Initialize weights and apply final processing self.post_init() def lowerCAmelCase__ ( self ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase__ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , ): _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) _A = self.encoder( snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , ) _A = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case_ , hidden_states=encoder_outputs.hidden_states , ) class lowerCamelCase( nn.Module ): '''simple docstring''' def __init__( self , snake_case_ ): super().__init__() _A = nn.Linear(config.hidden_size , config.hidden_size ) def lowerCAmelCase__ ( self , snake_case_ ): _A = self.dense(snake_case_ ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , __snake_case , ) class lowerCamelCase( __snake_case ): '''simple docstring''' def __init__( self , snake_case_ ): super().__init__(snake_case_ ) _A = config.num_labels _A = PoolFormerModel(snake_case_ ) # Final norm _A = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _A = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase__ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ): _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.poolformer( snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , ) _A = outputs[0] _A = self.classifier(self.norm(snake_case_ ).mean([-2, -1] ) ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = 'single_label_classification' else: _A = 'multi_label_classification' if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(snake_case_ , snake_case_ ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(snake_case_ , snake_case_ ) if not return_dict: _A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
27
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( snake_case__ : int ) -> Dict: UpperCamelCase : Optional[Any] = tmp_path / 'file.csv' UpperCamelCase : Optional[Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]: UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' UpperCamelCase : Any = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Any = tmp_path / 'csv_with_image.csv' UpperCamelCase : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple: UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv' UpperCamelCase : Dict = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv' UpperCamelCase : Union[str, Any] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]: UpperCamelCase : str = Csv() UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case__ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(snake_case__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : List[str] = f.read().splitlines()[1] UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] ) UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() UpperCamelCase : str = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( snake_case__ : Any ) -> str: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : Any = f.read().splitlines()[1:] UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] ) UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() UpperCamelCase : List[str] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels] def UpperCamelCase ( snake_case__ : str ) -> List[Any]: UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} ) UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] ) UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) UpperCamelCase : str = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
40
0
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' super().__init__(*A, **A ) requires_backends(self, 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def UpperCamelCase_ ( self, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {} if top_k is not None: SCREAMING_SNAKE_CASE : Optional[Any] = top_k return {}, {}, postprocess_params def __call__( self, A, **A ): '''simple docstring''' return super().__call__(A, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = load_image(A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(images=A, return_tensors=self.framework ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: SCREAMING_SNAKE_CASE : int = self.model.config.num_labels if self.framework == "pt": SCREAMING_SNAKE_CASE : str = model_outputs.logits.softmax(-1 )[0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = probs.topk(A ) elif self.framework == "tf": SCREAMING_SNAKE_CASE : Optional[Any] = stable_softmax(model_outputs.logits, axis=-1 )[0] SCREAMING_SNAKE_CASE : int = tf.math.top_k(A, k=A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"Unsupported framework: {self.framework}" ) SCREAMING_SNAKE_CASE : Optional[int] = scores.tolist() SCREAMING_SNAKE_CASE : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A, A )]
28
import math import random def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float: UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase : int = (expected / 100) - layer_a # Error delta UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('''Expected value: ''')) __UpperCAmelCase = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
40
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): a__: str = AltDiffusionPipeline a__: List[str] = TEXT_TO_IMAGE_PARAMS a__: Any = TEXT_TO_IMAGE_BATCH_PARAMS a__: int = TEXT_TO_IMAGE_IMAGE_PARAMS a__: Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self ): torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCamelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , ) torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) lowerCamelCase_ = CLIPTextModel(UpperCAmelCase ) lowerCamelCase_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase_ = 77 lowerCamelCase_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=0 ): if str(UpperCAmelCase ).startswith('''mps''' ): lowerCamelCase_ = torch.manual_seed(UpperCAmelCase ) else: lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) lowerCamelCase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def UpperCAmelCase__ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.get_dummy_components() torch.manual_seed(0 ) lowerCamelCase_ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCamelCase_ = RobertaSeriesModelWithTransformation(UpperCAmelCase ) lowerCamelCase_ = text_encoder lowerCamelCase_ = AltDiffusionPipeline(**UpperCAmelCase ) lowerCamelCase_ = alt_pipe.to(UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase ) lowerCamelCase_ = '''A photo of an astronaut''' lowerCamelCase_ = alt_pipe(**UpperCAmelCase ) lowerCamelCase_ = output.images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase_ = np.array( [0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self ): lowerCamelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCamelCase_ = RobertaSeriesModelWithTransformation(UpperCAmelCase ) lowerCamelCase_ = text_encoder lowerCamelCase_ = AltDiffusionPipeline(**UpperCAmelCase ) lowerCamelCase_ = alt_pipe.to(UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase ) lowerCamelCase_ = alt_pipe(**UpperCAmelCase ) lowerCamelCase_ = output.images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase_ = np.array( [0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def UpperCAmelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self ): # make sure here that pndm scheduler skips prk lowerCamelCase_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=UpperCAmelCase ) lowerCamelCase_ = alt_pipe.to(UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCamelCase_ = '''A painting of a squirrel eating a burger''' lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = alt_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowerCamelCase_ = output.images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self ): lowerCamelCase_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowerCamelCase_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase ) lowerCamelCase_ = alt_pipe.to(UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCamelCase_ = '''A painting of a squirrel eating a burger''' lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = alt_pipe([prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type='''numpy''' ) lowerCamelCase_ = output.images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
29
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]: return EnvironmentCommand() class lowerCAmelCase_ ( a__ ): @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase : List[Any] = parser.add_parser('env' ) download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = huggingface_hub.__version__ UpperCamelCase : int = 'not installed' UpperCamelCase : Union[str, Any] = 'NA' if is_torch_available(): import torch UpperCamelCase : Any = torch.__version__ UpperCamelCase : str = torch.cuda.is_available() UpperCamelCase : Dict = 'not installed' if is_transformers_available(): import transformers UpperCamelCase : str = transformers.__version__ UpperCamelCase : Optional[Any] = 'not installed' if is_accelerate_available(): import accelerate UpperCamelCase : Dict = accelerate.__version__ UpperCamelCase : List[str] = 'not installed' if is_xformers_available(): import xformers UpperCamelCase : List[str] = xformers.__version__ UpperCamelCase : Dict = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(SCREAMING_SNAKE_CASE_ ) ) return info @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
40
0
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __a( _a ): """simple docstring""" lowerCAmelCase = '''perceiver''' def __init__( self ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=1_280 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=26 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="kv" ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=262 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=56 ,_SCREAMING_SNAKE_CASE=[368, 496] ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=1_920 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=[1, 16, 224, 224] ,**_SCREAMING_SNAKE_CASE ,) -> int: super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = num_latents UpperCAmelCase_ : Any = d_latents UpperCAmelCase_ : List[str] = d_model UpperCAmelCase_ : int = num_blocks UpperCAmelCase_ : str = num_self_attends_per_block UpperCAmelCase_ : Any = num_self_attention_heads UpperCAmelCase_ : List[str] = num_cross_attention_heads UpperCAmelCase_ : List[Any] = qk_channels UpperCAmelCase_ : Union[str, Any] = v_channels UpperCAmelCase_ : Optional[Any] = cross_attention_shape_for_attention UpperCAmelCase_ : List[str] = self_attention_widening_factor UpperCAmelCase_ : Optional[Any] = cross_attention_widening_factor UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : int = use_query_residual # masked language modeling attributes UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Tuple = max_position_embeddings # image classification attributes UpperCAmelCase_ : Tuple = image_size # flow attributes UpperCAmelCase_ : int = train_size # multimodal autoencoding attributes UpperCAmelCase_ : int = num_frames UpperCAmelCase_ : Dict = audio_samples_per_frame UpperCAmelCase_ : Dict = samples_per_patch UpperCAmelCase_ : str = output_shape class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase_ : Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def a__ ( self ) -> float: return 1e-4 def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : Dict = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : str = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Union[str, Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size UpperCAmelCase_ : Optional[int] = dict(preprocessor(_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : List[str] = inputs.pop('''input_ids''' ) return inputs elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : int = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCAmelCase_ : Optional[int] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : str = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
30
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''▁''' __UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } __UpperCAmelCase = { '''facebook/xglm-564M''': 2_048, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None: UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCamelCase : Any = 7 UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} UpperCamelCase : Optional[int] = len(self.sp_model ) UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: UpperCamelCase : int = self.__dict__.copy() UpperCamelCase : Union[str, Any] = None UpperCamelCase : int = self.sp_model.serialized_model_proto() return state def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase : Any = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): UpperCamelCase : Any = {} UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCamelCase : Optional[int] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : str = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case_ ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip() return out_string def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi: UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
40
0
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch lowerCamelCase__ : Dict = random.Random() def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple: if rng is None: SCREAMING_SNAKE_CASE_ = global_rng SCREAMING_SNAKE_CASE_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = min_seq_length SCREAMING_SNAKE_CASE_ = max_seq_length SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE_ = feature_size SCREAMING_SNAKE_CASE_ = padding_value SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = num_mel_bins SCREAMING_SNAKE_CASE_ = hop_length SCREAMING_SNAKE_CASE_ = win_length SCREAMING_SNAKE_CASE_ = win_function SCREAMING_SNAKE_CASE_ = fmin SCREAMING_SNAKE_CASE_ = fmax SCREAMING_SNAKE_CASE_ = mel_floor SCREAMING_SNAKE_CASE_ = return_attention_mask def lowerCAmelCase_ ( self : Union[str, Any] ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ): def _flatten(_lowerCAmelCase : Dict ): return list(itertools.chain(*_lowerCAmelCase ) ) if equal_length: SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE_ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs] return speech_inputs def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ): if equal_length: SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE_ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = SpeechTaFeatureExtractor def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ): self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) ) def lowerCAmelCase_ ( self : List[Any] ): # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) # Test batched SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE_ = [None, 1_600, None] for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' ) SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self.assertTrue(input_values[0][1_000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 ) SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths] SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE_ = [None, 1_600, None] for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE_ = feat_extract( _lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' ) SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE_ = feat_extract( _lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_000) ) SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE_ = feat_extract( _lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' ) SCREAMING_SNAKE_CASE_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_200) ) def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowerCAmelCase_ ( self : Tuple ): # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) # Test batched SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) SCREAMING_SNAKE_CASE_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) SCREAMING_SNAKE_CASE_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = self.feat_extract_dict SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs] SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = self.feat_extract_dict SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs] SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE_ = feat_extract.pad( _lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowerCAmelCase_ ( self : Any ): # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor( [2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03, 3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03, 2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04, 4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03, 7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04, 4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] ) # fmt: on SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 93_680) ) self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) ) def lowerCAmelCase_ ( self : Optional[int] ): # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) )
31
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCAmelCase = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __UpperCAmelCase = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] UpperCAmelCase__ : Dict = RobertaTokenizer def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) ) UpperCamelCase : List[str] = add_prefix_space UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = add_prefix_space UpperCamelCase : Optional[Any] = 'post_processor' UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase : Optional[Any] = tuple(state['sep'] ) if "cls" in state: UpperCamelCase : Optional[int] = tuple(state['cls'] ) UpperCamelCase : Any = False if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Optional[int] = add_prefix_space UpperCamelCase : List[Any] = True if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase : Dict = trim_offsets UpperCamelCase : Union[str, Any] = True if changes_to_apply: UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) ) UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase : List[Any] = value def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple: UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
40
0
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(A__ ) , """Tatoeba directory does not exist.""" ) class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase( self ): _UpperCAmelCase = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCamelCase ) @slow def UpperCamelCase( self ): self.resolver.convert_models(['''heb-eng'''] ) @slow def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_UpperCamelCase ) assert mmeta["long_pair"] == "heb-eng"
32
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple: super().__init__(features=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch_tensor_kwargs import torch # noqa import torch at initialization def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict: import torch if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any: import torch if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ): return value.tolist() UpperCamelCase : str = {} if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ): UpperCamelCase : List[str] = {'dtype': torch.intaa} elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ): UpperCamelCase : int = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: import torch # support for torch, tf, jax etc. if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ): UpperCamelCase : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor": UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] ) UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: UpperCamelCase : str = self._consolidate(batch[column_name] ) return batch
40
0
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class __magic_name__ : '''simple docstring''' def __init__( self:str , _a:Optional[Any] , _a:Any=13 , _a:List[str]=7 , _a:str=True , _a:Dict=True , _a:Any=False , _a:Dict=True , _a:int=99 , _a:List[str]=64 , _a:Union[str, Any]=5 , _a:Optional[int]=4 , _a:int=64 , _a:List[str]="gelu" , _a:List[Any]=0.1 , _a:Dict=0.1 , _a:str=5_12 , _a:Optional[int]=16 , _a:Tuple=2 , _a:int=0.02 , _a:Dict=3 , _a:Tuple=4 , _a:Tuple=None , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self:str ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Dict , _a:Dict , _a:Optional[Any] , _a:Dict , _a:Optional[int] , _a:int ): snake_case__ = MPNetModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a , _a ) snake_case__ = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:str , _a:Optional[Any] , _a:Tuple , _a:Tuple , _a:Tuple , _a:Optional[int] ): snake_case__ = MPNetForQuestionAnswering(config=_a ) model.to(_a ) model.eval() snake_case__ = model( _a , attention_mask=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:int , _a:List[Any] , _a:Union[str, Any] , _a:Optional[int] , _a:Tuple , _a:List[str] ): snake_case__ = self.num_labels snake_case__ = MPNetForSequenceClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[Any] , _a:List[Any] , _a:Any , _a:int , _a:str , _a:Tuple ): snake_case__ = self.num_choices snake_case__ = MPNetForMultipleChoice(config=_a ) model.to(_a ) model.eval() snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = model( _a , attention_mask=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int , _a:Optional[Any] , _a:Optional[Any] , _a:Union[str, Any] , _a:List[Any] , _a:List[Any] ): snake_case__ = self.num_labels snake_case__ = MPNetForTokenClassification(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.prepare_config_and_inputs() ((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) = config_and_inputs snake_case__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) __lowercase : int = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : Any = True def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = MPNetModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*_a ) @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) snake_case__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) snake_case__ = model(_a )[0] snake_case__ = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , _a ) snake_case__ = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
33
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: UpperCamelCase : int = ( 'Wrong input data\'s dimensions... ' F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(snake_case__ ) try: if dataset.shape[1] != value_array.shape[1]: UpperCamelCase : str = ( 'Wrong input data\'s shape... ' F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(snake_case__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: UpperCamelCase : Dict = ( 'Input data have different datatype... ' F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(snake_case__ ) UpperCamelCase : List[Any] = [] for value in value_array: UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] ) UpperCamelCase : Dict = dataset[0].tolist() for dataset_value in dataset[1:]: UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ ) if dist > temp_dist: UpperCamelCase : str = temp_dist UpperCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ )) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" UpperCamelCase = 0 if start < end: UpperCamelCase = randint(_lowercase ,_lowercase ) UpperCamelCase = a[end] UpperCamelCase = a[pivot] UpperCamelCase = temp UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase ) count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 ) count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase ) return count def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" UpperCamelCase = 0 UpperCamelCase = randint(_lowercase ,_lowercase ) UpperCamelCase = a[end] UpperCamelCase = a[pivot] UpperCamelCase = temp UpperCamelCase = start - 1 for index in range(_lowercase ,_lowercase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value UpperCamelCase = new_pivot_index + 1 UpperCamelCase = a[new_pivot_index] UpperCamelCase = a[index] UpperCamelCase = temp UpperCamelCase = a[new_pivot_index + 1] UpperCamelCase = a[end] UpperCamelCase = temp return new_pivot_index + 1, count SCREAMING_SNAKE_CASE_ = TemporaryFile() SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array SCREAMING_SNAKE_CASE_ = np.load(outfile) SCREAMING_SNAKE_CASE_ = len(M) - 1 SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
34
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) __UpperCAmelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = '''Normal''' if result[0][0] == 1: __UpperCAmelCase = '''Abnormality detected'''
40
0
import unittest from knapsack import knapsack as k class lowercase ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] SCREAMING_SNAKE_CASE__ : Tuple = [0] SCREAMING_SNAKE_CASE__ : str = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [60] SCREAMING_SNAKE_CASE__ : Dict = [10] SCREAMING_SNAKE_CASE__ : Dict = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 ) def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : Tuple = 3 SCREAMING_SNAKE_CASE__ : Tuple = [1, 2, 3] SCREAMING_SNAKE_CASE__ : Any = [3, 2, 1] SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 ) def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : List[str] = 50 SCREAMING_SNAKE_CASE__ : Tuple = [60, 1_00, 1_20] SCREAMING_SNAKE_CASE__ : Any = [10, 20, 30] SCREAMING_SNAKE_CASE__ : int = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 2_20 ) if __name__ == "__main__": unittest.main()
35
import os import pytest from attr import dataclass __UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" UpperCAmelCase__ : Union[str, Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self ) -> str: return F"""{self.framework}-transfromers-test""" @property def snake_case_ ( self ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
40
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowercase : Optional[int] = logging.get_logger(__name__) __lowercase : List[Any] = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def lowercase ( __A : Optional[Any] , __A : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : str ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split(""".""" ): snake_case : Optional[int] = getattr(__A , __A ) if weight_type is not None: snake_case : Tuple = getattr(__A , __A ).shape else: snake_case : Dict = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case : Optional[int] = value elif weight_type == "weight_g": snake_case : int = value elif weight_type == "weight_v": snake_case : Union[str, Any] = value elif weight_type == "bias": snake_case : int = value else: snake_case : str = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowercase ( __A : Tuple , __A : Optional[int] , __A : int ) -> int: '''simple docstring''' snake_case : Any = [] snake_case : Optional[int] = fairseq_model.state_dict() snake_case : Tuple = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): snake_case : List[Any] = False if "conv_layers" in name: load_conv_layer( __A , __A , __A , __A , hf_model.config.feat_extract_norm == """group""" , ) snake_case : Dict = True else: for key, mapped_key in MAPPING.items(): snake_case : Dict = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case : str = True if "*" in mapped_key: snake_case : Optional[int] = name.split(__A )[0].split(""".""" )[-2] snake_case : Optional[Any] = mapped_key.replace("""*""" , __A ) if "weight_g" in name: snake_case : Dict = """weight_g""" elif "weight_v" in name: snake_case : List[Any] = """weight_v""" elif "weight" in name: snake_case : Tuple = """weight""" elif "bias" in name: snake_case : int = """bias""" else: snake_case : List[Any] = None set_recursively(__A , __A , __A , __A , __A ) continue if not is_used: unused_weights.append(__A ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( __A : List[Any] , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Optional[int] ) -> Any: '''simple docstring''' snake_case : str = full_name.split("""conv_layers.""" )[-1] snake_case : List[Any] = name.split(""".""" ) snake_case : Tuple = int(items[0] ) snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case : str = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__A ) def lowercase ( __A : Optional[Any] , __A : List[str] ) -> str: '''simple docstring''' snake_case : Tuple = SEWConfig() if is_finetuned: snake_case : Dict = model.wav_encoder.wav_model.cfg else: snake_case : str = model.cfg snake_case : Union[str, Any] = fs_config.conv_bias snake_case : Any = eval(fs_config.conv_feature_layers ) snake_case : Any = [x[0] for x in conv_layers] snake_case : Dict = [x[1] for x in conv_layers] snake_case : Optional[int] = [x[2] for x in conv_layers] snake_case : str = """gelu""" snake_case : Tuple = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" snake_case : List[Any] = 0.0 snake_case : int = fs_config.activation_fn.name snake_case : Any = fs_config.encoder_embed_dim snake_case : Optional[int] = 0.02 snake_case : Dict = fs_config.encoder_ffn_embed_dim snake_case : int = 1E-5 snake_case : List[Any] = fs_config.encoder_layerdrop snake_case : List[Any] = fs_config.encoder_attention_heads snake_case : Dict = fs_config.conv_pos_groups snake_case : Dict = fs_config.conv_pos snake_case : Union[str, Any] = len(__A ) snake_case : Dict = fs_config.encoder_layers snake_case : Tuple = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: snake_case : List[Any] = model.cfg snake_case : Optional[int] = fs_config.final_dropout snake_case : List[str] = fs_config.layerdrop snake_case : int = fs_config.activation_dropout snake_case : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 snake_case : Union[str, Any] = fs_config.attention_dropout snake_case : List[Any] = fs_config.dropout_input snake_case : List[Any] = fs_config.dropout snake_case : int = fs_config.mask_channel_length snake_case : Any = fs_config.mask_channel_prob snake_case : Dict = fs_config.mask_length snake_case : Dict = fs_config.mask_prob snake_case : Optional[Any] = """Wav2Vec2FeatureExtractor""" snake_case : Tuple = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def lowercase ( __A : Any , __A : str , __A : Any=None , __A : int=None , __A : Union[str, Any]=True ) -> List[str]: '''simple docstring''' if is_finetuned: snake_case , snake_case , snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case , snake_case , snake_case : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: snake_case : int = SEWConfig.from_pretrained(__A ) else: snake_case : List[str] = convert_config(model[0] , __A ) snake_case : Optional[Any] = model[0].eval() snake_case : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False snake_case : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , ) if is_finetuned: if dict_path: snake_case : str = Dictionary.load(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case : Optional[Any] = target_dict.pad_index snake_case : List[Any] = target_dict.bos_index snake_case : Union[str, Any] = target_dict.pad_index snake_case : Tuple = target_dict.bos_index snake_case : Union[str, Any] = target_dict.eos_index snake_case : str = len(target_dict.symbols ) snake_case : Tuple = os.path.join(__A , """vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A , exist_ok=__A ) with open(__A , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , __A ) snake_case : int = WavaVecaCTCTokenizer( __A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__A , ) snake_case : Tuple = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A ) processor.save_pretrained(__A ) snake_case : Any = SEWForCTC(__A ) else: snake_case : Tuple = SEWModel(__A ) feature_extractor.save_pretrained(__A ) recursively_load_weights(__A , __A , __A ) hf_model.save_pretrained(__A ) if __name__ == "__main__": __lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowercase : Dict = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
36
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = '''src/transformers''' __UpperCAmelCase = '''docs/source/en/tasks''' def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]: with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCamelCase : Optional[Any] = f.readlines() # Find the start prompt. UpperCamelCase : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 UpperCamelCase : Optional[Any] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]: UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide] UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) UpperCamelCase : Tuple = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
40
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase : Tuple = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" _lowercase = ['input_features', 'attention_mask'] def __init__( self : str , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : int=80 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=True , **lowerCamelCase__ : Optional[int] , ): super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ ) a__ : Dict = num_mel_bins a__ : Optional[int] = do_ceptral_normalize a__ : List[str] = normalize_means a__ : Any = normalize_vars a__ : Optional[Any] = True def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : np.ndarray , ): a__ : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers a__ : Union[str, Any] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ) a__ : str = ta_kaldi.fbank(lowerCamelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _UpperCamelCase( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : int , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: a__ : List[str] = x[:input_length].mean(axis=0 ) a__ : Dict = np.subtract(lowerCamelCase__ , lowerCamelCase__ ) if normalize_vars: a__ : Any = x[:input_length].std(axis=0 ) a__ : List[str] = np.divide(lowerCamelCase__ , lowerCamelCase__ ) if input_length < x.shape[0]: a__ : str = padding_value # make sure array is in float32 a__ : List[Any] = x.astype(np.floataa ) return x def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[np.ndarray] , lowerCamelCase__ : Optional[np.ndarray] = None ): a__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(lowerCamelCase__ , lowerCamelCase__ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(lowerCamelCase__ , lowerCamelCase__ ) ] def __call__( self : str , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : List[Any] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a__ : List[str] = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) a__ : Dict = is_batched_numpy or ( isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a__ : str = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ): a__ : Tuple = np.asarray(lowerCamelCase__ , dtype=np.floataa ) elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a__ : List[str] = [raw_speech] # extract fbank features a__ : Any = [self._extract_fbank_features(lowerCamelCase__ ) for waveform in raw_speech] # convert into correct format for padding a__ : Any = BatchFeature({"input_features": features} ) a__ : Any = self.pad( lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) # make sure list is in array format a__ : List[str] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , lowerCamelCase__ ): a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features] a__ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: a__ : Optional[Any] = ( np.array(lowerCamelCase__ , dtype=np.intaa ) if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD else None ) a__ : Optional[Any] = self.normalize( padded_inputs["input_features"] , attention_mask=lowerCamelCase__ ) if return_tensors is not None: a__ : Optional[Any] = padded_inputs.convert_to_tensors(lowerCamelCase__ ) return padded_inputs
37
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : int = IFPipeline UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ) -> str: return self._get_dummy_components() def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case_ ( self ) -> Optional[int]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def snake_case_ ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ) -> Dict: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ) -> Optional[int]: self._test_save_load_local() def snake_case_ ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def snake_case_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> List[Any]: # if UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa ) UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Tuple = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : List[Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
40
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = KandinskyVaaPipeline lowerCamelCase__ = [ '''image_embeds''', '''negative_image_embeds''', ] lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds'''] lowerCamelCase__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase__ = False @property def __UpperCamelCase ( self ): return 3_2 @property def __UpperCamelCase ( self ): return 3_2 @property def __UpperCamelCase ( self ): return self.time_input_dim @property def __UpperCamelCase ( self ): return self.time_input_dim * 4 @property def __UpperCamelCase ( self ): return 1_0_0 @property def __UpperCamelCase ( self ): torch.manual_seed(0 ) snake_case__ : Union[str, Any] = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } snake_case__ : Dict = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def __UpperCamelCase ( self ): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase ( self ): torch.manual_seed(0 ) snake_case__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase ( self ): snake_case__ : Union[str, Any] = self.dummy_unet snake_case__ : Union[str, Any] = self.dummy_movq snake_case__ : List[Any] = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , ) snake_case__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ): snake_case__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): snake_case__ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 6_4, """width""": 6_4, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __UpperCamelCase ( self ): snake_case__ : str = """cpu""" snake_case__ : Dict = self.get_dummy_components() snake_case__ : int = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) snake_case__ : int = output.images snake_case__ : Dict = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0] snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1] snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case__ : Tuple = np.array( [0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self ): snake_case__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" ) snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) snake_case__ : str = KandinskyVaaPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) snake_case__ : List[str] = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : int = """red cat, 4k photo""" snake_case__ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 ) snake_case__ , snake_case__ : Optional[Any] = pipe_prior( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() snake_case__ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 ) snake_case__ : Optional[int] = pipeline( image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , output_type="""np""" , ) snake_case__ : Optional[int] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
38
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase ( snake_case__ : Tuple="" ) -> str: UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> int: UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) ) def snake_case_ ( self ) -> Any: UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' ) sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 ) UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) ) UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = 'Hey!' UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCAmelCase_ = False class snake_case_ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Tuple ) ->Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__( self : Dict ) ->List[str]: snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = '''A painting of a squirrel eating a burger ''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCamelCase ) snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = generator.manual_seed(0 ) snake_case_ = pipe( prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def snake_case__( self : List[str] ) ->Tuple: snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = '''A painting of a squirrel eating a burger ''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images snake_case_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
39
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : int = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase : List[Any] = [] UpperCamelCase : List[Any] = list(range(snake_case__ ) ) # Find permutation while factorials: UpperCamelCase : int = factorials.pop() UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
40
0
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''KEY''') lowerCAmelCase__ = TypeVar('''VAL''') @dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ ) class lowercase_ (Generic[KEY, VAL] ): """simple docstring""" SCREAMING_SNAKE_CASE : KEY SCREAMING_SNAKE_CASE : VAL class lowercase_ (_Item ): """simple docstring""" def __init__( self : Optional[int] ): super().__init__(lowercase__ ,lowercase__ ) def __bool__( self : List[str] ): return False lowerCAmelCase__ = _DeletedItem() class lowercase_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ): __lowercase = initial_block_size __lowercase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowercase = capacity_factor __lowercase = 0 def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ): return hash(lowercase__ ) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ): return (ind + 1) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ): __lowercase = self._buckets[ind] if not stored: __lowercase = _Item(lowercase__ ,lowercase__ ) self._len += 1 return True elif stored.key == key: __lowercase = _Item(lowercase__ ,lowercase__ ) return True else: return False def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): if len(self._buckets ) <= self._initial_block_size: return False __lowercase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ): __lowercase = self._buckets __lowercase = [None] * new_size __lowercase = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def SCREAMING_SNAKE_CASE ( self : str ): self._resize(len(self._buckets ) * 2 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): self._resize(len(self._buckets ) // 2 ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ): __lowercase = self._get_bucket_index(lowercase__ ) for _ in range(len(self._buckets ) ): yield ind __lowercase = self._get_next_ind(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): for ind in self._iterate_buckets(lowercase__ ): if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ): break def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): if self._is_full(): self._size_up() self._add_item(lowercase__ ,lowercase__ ) def __delitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: raise KeyError(lowercase__ ) if item is _deleted: continue if item.key == key: __lowercase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowercase__ ) def __len__( self : Optional[int] ): return self._len def __iter__( self : str ): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ): __lowercase = ''' ,'''.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
41
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase_ ( a__ ): def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) ) class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any: UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : List[Any] = image_size UpperCamelCase : List[str] = patch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 ) UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[int] = conv_kernel_size UpperCamelCase : List[str] = output_stride UpperCamelCase : Union[str, Any] = classifier_dropout_prob UpperCamelCase : List[Any] = use_labels UpperCamelCase : Any = is_training UpperCamelCase : int = num_labels UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Tuple = scope UpperCamelCase : List[str] = width_multiplier UpperCamelCase : Any = ffn_dropout UpperCamelCase : List[Any] = attn_dropout def snake_case_ ( self ) -> int: UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[str] = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def snake_case_ ( self ) -> int: return MobileViTVaConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Any = self.num_labels UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ : Any = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Dict = MobileViTVaModelTester(self ) UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def snake_case_ ( self ) -> int: pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def snake_case_ ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> List[str]: UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : str = [*signature.parameters.keys()] UpperCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Tuple = outputs.hidden_states UpperCamelCase : Dict = 5 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase : Any = 2 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[int] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> str: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) @slow def snake_case_ ( self ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Tuple: UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> str: return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ], device=SCREAMING_SNAKE_CASE_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Tuple = prepare_img() UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = outputs.logits.detach().cpu() UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] ) UpperCamelCase : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
40
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase = 4_00_00_00 ) -> int: lowerCamelCase_ = [0, 1] lowerCamelCase_ = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCamelCase_ = 0 for j in range(len(__UpperCamelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
42
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str: UpperCamelCase : List[str] = [0] * len(snake_case__ ) UpperCamelCase : int = [] UpperCamelCase : Optional[int] = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: UpperCamelCase : Optional[int] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase : Tuple = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph __UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
40
0
def _a ( SCREAMING_SNAKE_CASE = 1_00 ): """simple docstring""" lowercase__ = n * (n + 1) * (2 * n + 1) / 6 lowercase__ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f"""{solution() = }""")
43
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
40
0
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Tuple = torch.exp(_lowerCAmelCase ) _lowerCamelCase : int = torch.sum(_lowerCAmelCase , dim=1 ) # sum of exp(x_i) _lowerCamelCase : Optional[int] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(_lowerCAmelCase ) - B / A class UpperCAmelCase__ ( nn.Module ): def __init__( self : Tuple,__A : List[Any] ): super().__init__() _lowerCamelCase : Optional[int] = config.output_attentions _lowerCamelCase : Any = config.output_hidden_states _lowerCamelCase : str = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] ) _lowerCamelCase : str = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] ) _lowerCamelCase : List[str] = [-1 for _ in range(config.num_hidden_layers )] def lowerCamelCase_ ( self : List[Any],__A : int ): if (type(__A ) is float) or (type(__A ) is int): for i in range(len(self.early_exit_entropy ) ): _lowerCamelCase : List[Any] = x else: _lowerCamelCase : List[str] = x def lowerCamelCase_ ( self : Optional[int],__A : Union[str, Any] ): _lowerCamelCase : List[str] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def lowerCamelCase_ ( self : List[str],__A : int,__A : Any=None,__A : Any=None,__A : int=None,__A : Optional[int]=None,): _lowerCamelCase : Tuple = () _lowerCamelCase : Tuple = () _lowerCamelCase : Optional[Any] = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowerCamelCase : int = all_hidden_states + (hidden_states,) _lowerCamelCase : List[Any] = layer_module( __A,__A,head_mask[i],__A,__A ) _lowerCamelCase : Dict = layer_outputs[0] if self.output_attentions: _lowerCamelCase : str = all_attentions + (layer_outputs[1],) _lowerCamelCase : List[str] = (hidden_states,) if self.output_hidden_states: _lowerCamelCase : str = current_outputs + (all_hidden_states,) if self.output_attentions: _lowerCamelCase : Dict = current_outputs + (all_attentions,) _lowerCamelCase : Any = self.highway[i](__A ) # logits, pooled_output if not self.training: _lowerCamelCase : int = highway_exit[0] _lowerCamelCase : Optional[Any] = entropy(__A ) _lowerCamelCase : Tuple = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowerCamelCase : List[str] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowerCamelCase : int = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(__A,i + 1 ) else: _lowerCamelCase : int = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowerCamelCase : Optional[Any] = all_hidden_states + (hidden_states,) _lowerCamelCase : List[Any] = (hidden_states,) if self.output_hidden_states: _lowerCamelCase : int = outputs + (all_hidden_states,) if self.output_attentions: _lowerCamelCase : Optional[int] = outputs + (all_attentions,) _lowerCamelCase : Tuple = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( 'The Bert Model transformer with early exiting (DeeBERT). ' , A , ) class UpperCAmelCase__ ( A ): def __init__( self : Any,__A : int ): super().__init__(__A ) _lowerCamelCase : str = config _lowerCamelCase : str = BertEmbeddings(__A ) _lowerCamelCase : Tuple = DeeBertEncoder(__A ) _lowerCamelCase : List[str] = BertPooler(__A ) self.init_weights() def lowerCamelCase_ ( self : Dict ): self.encoder.init_highway_pooler(self.pooler ) def lowerCamelCase_ ( self : Tuple ): return self.embeddings.word_embeddings def lowerCamelCase_ ( self : Any,__A : Optional[int] ): _lowerCamelCase : str = value def lowerCamelCase_ ( self : Tuple,__A : int ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(__A ) @add_start_docstrings_to_model_forward(__A ) def lowerCamelCase_ ( self : Optional[int],__A : List[str]=None,__A : int=None,__A : Union[str, Any]=None,__A : Tuple=None,__A : Dict=None,__A : int=None,__A : Dict=None,__A : List[str]=None,): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: _lowerCamelCase : List[str] = input_ids.size() elif inputs_embeds is not None: _lowerCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) _lowerCamelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowerCamelCase : List[str] = torch.ones(__A,device=__A ) if encoder_attention_mask is None: _lowerCamelCase : List[Any] = torch.ones(__A,device=__A ) if token_type_ids is None: _lowerCamelCase : List[str] = torch.zeros(__A,dtype=torch.long,device=__A ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowerCamelCase : torch.Tensor = self.get_extended_attention_mask(__A,__A,__A ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowerCamelCase : List[str] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowerCamelCase : Optional[int] = encoder_attention_mask[:, None, None, :] _lowerCamelCase : Union[str, Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowerCamelCase : Any = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowerCamelCase : Union[str, Any] = self.get_head_mask(__A,self.config.num_hidden_layers ) _lowerCamelCase : List[Any] = self.embeddings( input_ids=__A,position_ids=__A,token_type_ids=__A,inputs_embeds=__A ) _lowerCamelCase : Union[str, Any] = self.encoder( __A,attention_mask=__A,head_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,) _lowerCamelCase : Union[str, Any] = encoder_outputs[0] _lowerCamelCase : Tuple = self.pooler(__A ) _lowerCamelCase : Optional[int] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class UpperCAmelCase__ ( A ): def __init__( self : List[Any],__A : int,__A : int ): _lowerCamelCase : Tuple = message _lowerCamelCase : Union[str, Any] = exit_layer # start from 1! class UpperCAmelCase__ ( nn.Module ): def __init__( self : int,__A : List[Any] ): super().__init__() _lowerCamelCase : List[Any] = BertPooler(__A ) _lowerCamelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob ) _lowerCamelCase : Tuple = nn.Linear(config.hidden_size,config.num_labels ) def lowerCamelCase_ ( self : Tuple,__A : Union[str, Any] ): # Pooler _lowerCamelCase : int = encoder_outputs[0] _lowerCamelCase : Tuple = self.pooler(__A ) # "return" pooler_output # BertModel _lowerCamelCase : Any = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowerCamelCase : List[Any] = bmodel_output[1] _lowerCamelCase : Optional[int] = self.dropout(__A ) _lowerCamelCase : Optional[Any] = self.classifier(__A ) return logits, pooled_output @add_start_docstrings( 'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , A , ) class UpperCAmelCase__ ( A ): def __init__( self : str,__A : Optional[int] ): super().__init__(__A ) _lowerCamelCase : Tuple = config.num_labels _lowerCamelCase : Dict = config.num_hidden_layers _lowerCamelCase : Union[str, Any] = DeeBertModel(__A ) _lowerCamelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob ) _lowerCamelCase : int = nn.Linear(config.hidden_size,self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(__A ) def lowerCamelCase_ ( self : Dict,__A : Any=None,__A : Optional[Any]=None,__A : Union[str, Any]=None,__A : Union[str, Any]=None,__A : Optional[int]=None,__A : Optional[int]=None,__A : Union[str, Any]=None,__A : Optional[int]=-1,__A : Any=False,): _lowerCamelCase : Optional[Any] = self.num_layers try: _lowerCamelCase : Optional[Any] = self.bert( __A,attention_mask=__A,token_type_ids=__A,position_ids=__A,head_mask=__A,inputs_embeds=__A,) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowerCamelCase : Union[str, Any] = outputs[1] _lowerCamelCase : List[Any] = self.dropout(__A ) _lowerCamelCase : str = self.classifier(__A ) _lowerCamelCase : int = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCamelCase : Tuple = e.message _lowerCamelCase : Optional[int] = e.exit_layer _lowerCamelCase : Optional[Any] = outputs[0] if not self.training: _lowerCamelCase : List[Any] = entropy(__A ) _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCamelCase : List[Any] = MSELoss() _lowerCamelCase : Tuple = loss_fct(logits.view(-1 ),labels.view(-1 ) ) else: _lowerCamelCase : Optional[int] = CrossEntropyLoss() _lowerCamelCase : Optional[int] = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) ) # work with highway exits _lowerCamelCase : Union[str, Any] = [] for highway_exit in outputs[-1]: _lowerCamelCase : Any = highway_exit[0] if not self.training: highway_logits_all.append(__A ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowerCamelCase : List[str] = MSELoss() _lowerCamelCase : str = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) ) else: _lowerCamelCase : Optional[int] = CrossEntropyLoss() _lowerCamelCase : str = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) ) highway_losses.append(__A ) if train_highway: _lowerCamelCase : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowerCamelCase : Union[str, Any] = (loss,) + outputs if not self.training: _lowerCamelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCamelCase : str = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
44
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
from __future__ import annotations import numpy as np def A ( lowercase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]: UpperCamelCase__ , UpperCamelCase__ :str = np.shape(lowercase__ ) if rows != columns: UpperCamelCase__ :str = ( """'table' has to be of square shaped array but got a """ f"""{rows}x{columns} array:\n{table}""" ) raise ValueError(lowercase__ ) UpperCamelCase__ :Union[str, Any] = np.zeros((rows, columns) ) UpperCamelCase__ :int = np.zeros((rows, columns) ) for i in range(lowercase__ ): for j in range(lowercase__ ): UpperCamelCase__ :List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) UpperCamelCase__ :int = (table[i][j] - total) / upper[j][j] UpperCamelCase__ :Optional[Any] = 1 for j in range(lowercase__ , lowercase__ ): UpperCamelCase__ :int = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) UpperCamelCase__ :Optional[int] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
45
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) @dataclass class A_ ( _a ): lowerCAmelCase__ = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self: Dict ,**__lowerCAmelCase: str ): '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _lowerCamelCase : str = deprecated_arg[3:] setattr(self ,__lowerCAmelCase ,not kwargs.pop(__lowerCAmelCase ) ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) _lowerCamelCase : Optional[Any] = kwargs.pop("torchscript" ,self.torchscript ) _lowerCamelCase : List[str] = kwargs.pop("torch_xla_tpu_print_metrics" ,self.torch_xla_tpu_print_metrics ) _lowerCamelCase : Optional[int] = kwargs.pop("fp16_opt_level" ,self.fpaa_opt_level ) super().__init__(**__lowerCAmelCase ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'Trace the models using torchscript'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'Print Xla/PyTorch tpu metrics'} ) lowerCAmelCase__ = field( default='O1' , metadata={ 'help': ( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ' 'See details at https://nvidia.github.io/apex/amp.html' ) } , ) @cached_property def _lowercase ( self: Tuple ): '''simple docstring''' requires_backends(self ,["torch"] ) logger.info("PyTorch: setting up devices" ) if not self.cuda: _lowerCamelCase : Optional[int] = torch.device("cpu" ) _lowerCamelCase : Optional[int] = 0 elif is_torch_tpu_available(): _lowerCamelCase : Union[str, Any] = xm.xla_device() _lowerCamelCase : Tuple = 0 else: _lowerCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _lowerCamelCase : Optional[Any] = torch.cuda.device_count() return device, n_gpu @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def _lowercase ( self: Any ): '''simple docstring''' requires_backends(self ,["torch"] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _lowercase ( self: Dict ): '''simple docstring''' requires_backends(self ,["torch"] ) return self._setup_devices[0] @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' requires_backends(self ,["torch"] ) return self._setup_devices[1] @property def _lowercase ( self: Any ): '''simple docstring''' return self.n_gpu > 0
46
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''ViTFeatureExtractor'''] __UpperCAmelCase = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = hidden_size __a : Optional[Any] = feat_extract_norm __a : List[str] = feat_extract_activation __a : Dict = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) __a : List[str] = list(SCREAMING_SNAKE_CASE__ ) __a : int = conv_bias __a : Tuple = num_conv_pos_embeddings __a : List[str] = num_conv_pos_embedding_groups __a : Optional[Any] = len(self.conv_dim ) __a : Union[str, Any] = num_hidden_layers __a : Optional[Any] = intermediate_size __a : Union[str, Any] = squeeze_factor __a : List[Any] = max_position_embeddings __a : Tuple = position_buckets __a : Optional[int] = share_att_key __a : List[str] = relative_attention __a : Any = norm_rel_ebd __a : Any = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = hidden_act __a : str = num_attention_heads __a : Union[str, Any] = hidden_dropout __a : Optional[int] = attention_dropout __a : List[str] = activation_dropout __a : int = feat_proj_dropout __a : int = final_dropout __a : Dict = layer_norm_eps __a : Tuple = feature_layer_norm_eps __a : str = initializer_range __a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a : Tuple = apply_spec_augment __a : Optional[Any] = mask_time_prob __a : Any = mask_time_length __a : List[str] = mask_time_min_masks __a : List[str] = mask_feature_prob __a : Tuple = mask_feature_length __a : Any = mask_feature_min_masks # ctc loss __a : Optional[int] = ctc_loss_reduction __a : List[Any] = ctc_zero_infinity # sequence classification __a : Dict = use_weighted_layer_sum __a : Optional[Any] = classifier_proj_size @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
47
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __UpperCAmelCase = random.Random() def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any: if rng is None: UpperCamelCase : int = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]: UpperCamelCase : Dict = parent UpperCamelCase : Dict = batch_size UpperCamelCase : Any = min_seq_length UpperCamelCase : Optional[int] = max_seq_length UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Tuple = feature_size UpperCamelCase : Any = padding_value UpperCamelCase : Tuple = sampling_rate UpperCamelCase : Optional[Any] = return_attention_mask UpperCamelCase : Optional[Any] = do_normalize def snake_case_ ( self ) -> Union[str, Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Union[str, Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Any = WavaVecaFeatureExtractor def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) ) def snake_case_ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test batched UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : Any = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Tuple = range(800, 1400, 200 ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths] UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : List[str] = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : int = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' ) UpperCamelCase : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' ) UpperCamelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def snake_case_ ( self ) -> str: import torch UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def snake_case_ ( self ) -> Tuple: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
40
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
def UpperCamelCase ( snake_case__ : int ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCamelCase : int = False if num < 0: UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" import operator as op _lowercase : List[str] = 'scaler.pt' _lowercase : Tuple = 'pytorch_model' _lowercase : Tuple = 'random_states' _lowercase : Optional[Any] = 'optimizer' _lowercase : str = 'scheduler' _lowercase : List[str] = 'pytorch_model.bin' _lowercase : Dict = 'pytorch_model.bin.index.json' _lowercase : Any = 'model.safetensors' _lowercase : Dict = 'model.safetensors.index.json' _lowercase : Optional[int] = '1.10.2' _lowercase : Tuple = 'py38' _lowercase : Dict = '4.17.0' _lowercase : List[Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] _lowercase : Optional[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] _lowercase : List[str] = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] _lowercase : List[str] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] _lowercase : Optional[Any] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] _lowercase : Optional[Any] = '2.0.1' _lowercase : Tuple = ['pdsh', 'standard', 'openmpi', 'mvapich'] _lowercase : Tuple = ['default', 'reduce-overhead', 'max-autotune'] _lowercase : List[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 _lowercase : Dict = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] _lowercase : int = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] _lowercase : str = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
49
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: UpperCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) UpperCamelCase : Dict = new_module UpperCamelCase : int = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) UpperCamelCase : Union[str, Any] = tensor_name in module._buffers UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) UpperCamelCase : Optional[Any] = False UpperCamelCase : str = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase : List[str] = False UpperCamelCase : Tuple = False else: UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCamelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase : Dict = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[Any] = value.to('cpu' ) if value.dtype == torch.inta: UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: UpperCamelCase : Union[str, Any] = new_value.T UpperCamelCase : Union[str, Any] = old_value.__dict__ if is_abit: UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) UpperCamelCase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[str] = value.to(snake_case__ ) else: UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: UpperCamelCase : Optional[int] = new_value else: UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) UpperCamelCase : List[str] = new_value def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int: for name, module in model.named_children(): if current_key_name is None: UpperCamelCase : str = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape else: UpperCamelCase : Any = module.in_features UpperCamelCase : List[str] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase : Any = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase : Optional[int] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase : str = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase : int = True # Store the module class in case we need to transpose the weight later UpperCamelCase : Any = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]: UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] ) UpperCamelCase : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase : List[Any] = list(model.named_children() ) UpperCamelCase : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys UpperCamelCase : Tuple = ['.weight', '.bias'] UpperCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
40
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
50
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( snake_case__ : int ) -> Dict: UpperCamelCase : Optional[Any] = tmp_path / 'file.csv' UpperCamelCase : Optional[Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]: UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' UpperCamelCase : Any = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Any = tmp_path / 'csv_with_image.csv' UpperCamelCase : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple: UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv' UpperCamelCase : Dict = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv' UpperCamelCase : Union[str, Any] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]: UpperCamelCase : str = Csv() UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case__ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(snake_case__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : List[str] = f.read().splitlines()[1] UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] ) UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() UpperCamelCase : str = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( snake_case__ : Any ) -> str: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : Any = f.read().splitlines()[1:] UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] ) UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() UpperCamelCase : List[str] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels] def UpperCamelCase ( snake_case__ : str ) -> List[Any]: UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} ) UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] ) UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) UpperCamelCase : str = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
40
0
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a__ : Dict = HfApi() a__ : List[str] = {} # fmt: off a__ : Dict = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) a__ : Dict = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) a__ : str = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) a__ : Dict = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) a__ : Any = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) a__ : Optional[int] = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) a__ : Optional[int] = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) a__ : str = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) a__ : Union[str, Any] = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) a__ : int = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) a__ : List[str] = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) a__ : Union[str, Any] = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) a__ : Any = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) a__ : List[Any] = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) a__ : List[Any] = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on a__ : Optional[Any] = api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a__ : Tuple = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('CompVis'): a__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: a__ : List[Any] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a__ : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a__ : int = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
51
import math import random def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float: UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase : int = (expected / 100) - layer_a # Error delta UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('''Expected value: ''')) __UpperCAmelCase = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
40
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]: return EnvironmentCommand() class lowerCAmelCase_ ( a__ ): @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase : List[Any] = parser.add_parser('env' ) download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = huggingface_hub.__version__ UpperCamelCase : int = 'not installed' UpperCamelCase : Union[str, Any] = 'NA' if is_torch_available(): import torch UpperCamelCase : Any = torch.__version__ UpperCamelCase : str = torch.cuda.is_available() UpperCamelCase : Dict = 'not installed' if is_transformers_available(): import transformers UpperCamelCase : str = transformers.__version__ UpperCamelCase : Optional[Any] = 'not installed' if is_accelerate_available(): import accelerate UpperCamelCase : Dict = accelerate.__version__ UpperCamelCase : List[str] = 'not installed' if is_xformers_available(): import xformers UpperCamelCase : List[str] = xformers.__version__ UpperCamelCase : Dict = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(SCREAMING_SNAKE_CASE_ ) ) return info @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
40
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _snake_case : List[str] = None _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} _snake_case : int = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } _snake_case : List[str] = { 'moussaKam/mbarthez': 1024, 'moussaKam/barthez': 1024, 'moussaKam/barthez-orangesum-title': 1024, } _snake_case : str = '▁' class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = BarthezTokenizer def __init__( self : Any , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[str]="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : int="<mask>" , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) __lowerCAmelCase = vocab_file __lowerCAmelCase = False if not self.vocab_file else True def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
53
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''▁''' __UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } __UpperCAmelCase = { '''facebook/xglm-564M''': 2_048, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None: UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCamelCase : Any = 7 UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} UpperCamelCase : Optional[int] = len(self.sp_model ) UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: UpperCamelCase : int = self.__dict__.copy() UpperCamelCase : Union[str, Any] = None UpperCamelCase : int = self.sp_model.serialized_model_proto() return state def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase : Any = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): UpperCamelCase : Any = {} UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCamelCase : Optional[int] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : str = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case_ ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip() return out_string def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi: UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
40
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class A : @staticmethod def lowerCAmelCase__ ( *_lowerCAmelCase: Tuple , **_lowerCAmelCase: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' pass def a__ ( lowercase__ ): '''simple docstring''' UpperCAmelCase_ =hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def a__ ( lowercase__ ): '''simple docstring''' UpperCAmelCase_ =np.array(lowercase__ ) UpperCAmelCase_ =npimg.shape return {"hash": hashimage(lowercase__ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class A ( unittest.TestCase ): _snake_case =dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _snake_case =dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: str ) -> Any: '''simple docstring''' UpperCAmelCase_ =MaskGenerationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] ) -> Dict: '''simple docstring''' pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def lowerCAmelCase__ ( self: str ) -> int: '''simple docstring''' pass @slow @require_torch def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ =pipeline("mask-generation" , model="facebook/sam-vit-huge" ) UpperCAmelCase_ =image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase_ =[] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53}, {"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67}, {"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93}, {"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09}, {"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79}, {"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34}, {"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16}, {"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12}, {"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99}, {"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52}, {"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32}, {"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16}, {"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99}, {"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83}, {"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64}, {"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43}, {"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43}, {"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08}, {"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35}, {"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26}, {"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62}, {"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99}, {"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86}, {"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84}, {"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73}, {"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71} ] , ) # fmt: on @require_torch @slow def lowerCAmelCase__ ( self: str ) -> Any: '''simple docstring''' UpperCAmelCase_ ="facebook/sam-vit-huge" UpperCAmelCase_ =pipeline("mask-generation" , model=_lowerCAmelCase ) UpperCAmelCase_ =image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase_ =[] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53}, ] , )
54
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCAmelCase = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __UpperCAmelCase = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] UpperCAmelCase__ : Dict = RobertaTokenizer def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) ) UpperCamelCase : List[str] = add_prefix_space UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = add_prefix_space UpperCamelCase : Optional[Any] = 'post_processor' UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase : Optional[Any] = tuple(state['sep'] ) if "cls" in state: UpperCamelCase : Optional[int] = tuple(state['cls'] ) UpperCamelCase : Any = False if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Optional[int] = add_prefix_space UpperCamelCase : List[Any] = True if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase : Dict = trim_offsets UpperCamelCase : Union[str, Any] = True if changes_to_apply: UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) ) UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase : List[Any] = value def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple: UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
40
0
SCREAMING_SNAKE_CASE :Union[str, Any] = [0, 2, 4, 6, 8] SCREAMING_SNAKE_CASE :Tuple = [1, 3, 5, 7, 9] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 __A = 0 for digit in range(1_0 ): __A = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 1_0 , a_ , a_ ) return result __A = 0 for digita in range(1_0 ): __A = digita if (remainder + digita) % 2 == 0: __A = ODD_DIGITS else: __A = EVEN_DIGITS for digita in other_parity_digits: __A = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 1_0 , a_ , a_ , ) return result def UpperCAmelCase ( a_ = 9 ) -> int: """simple docstring""" __A = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a_ , 0 , [0] * length , a_ ) return result if __name__ == "__main__": print(f'''{solution() = }''')
55
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple: super().__init__(features=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch_tensor_kwargs import torch # noqa import torch at initialization def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict: import torch if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any: import torch if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ): return value.tolist() UpperCamelCase : str = {} if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ): UpperCamelCase : List[str] = {'dtype': torch.intaa} elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ): UpperCamelCase : int = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: import torch # support for torch, tf, jax etc. if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ): UpperCamelCase : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor": UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] ) UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: UpperCamelCase : str = self._consolidate(batch[column_name] ) return batch
40
0
'''simple docstring''' def _a (lowercase__ : int = 3 , lowercase__ : int = 7 , lowercase__ : int = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" __snake_case = 0 __snake_case = 1 for current_denominator in range(1 , limit + 1 ): __snake_case = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: __snake_case = current_numerator __snake_case = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
56
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: UpperCamelCase : int = ( 'Wrong input data\'s dimensions... ' F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(snake_case__ ) try: if dataset.shape[1] != value_array.shape[1]: UpperCamelCase : str = ( 'Wrong input data\'s shape... ' F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(snake_case__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: UpperCamelCase : Dict = ( 'Input data have different datatype... ' F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(snake_case__ ) UpperCamelCase : List[Any] = [] for value in value_array: UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] ) UpperCamelCase : Dict = dataset[0].tolist() for dataset_value in dataset[1:]: UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ ) if dist > temp_dist: UpperCamelCase : str = temp_dist UpperCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ )) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ): UpperCamelCase_: int = parent UpperCamelCase_: List[str] = batch_size UpperCamelCase_: List[str] = seq_length UpperCamelCase_: Union[str, Any] = is_training UpperCamelCase_: Optional[int] = use_attention_mask UpperCamelCase_: List[Any] = use_token_type_ids UpperCamelCase_: Optional[Any] = use_labels UpperCamelCase_: List[str] = vocab_size UpperCamelCase_: Optional[int] = hidden_size UpperCamelCase_: str = num_hidden_layers UpperCamelCase_: str = num_attention_heads UpperCamelCase_: List[Any] = intermediate_size UpperCamelCase_: Optional[Any] = hidden_act UpperCamelCase_: int = hidden_dropout_prob UpperCamelCase_: Any = attention_probs_dropout_prob UpperCamelCase_: List[Any] = max_position_embeddings UpperCamelCase_: int = type_vocab_size UpperCamelCase_: str = type_sequence_label_size UpperCamelCase_: Union[str, Any] = initializer_range UpperCamelCase_: Optional[Any] = num_choices def _a ( self ): UpperCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_: List[str] = None if self.use_attention_mask: UpperCamelCase_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase_: List[Any] = None if self.use_token_type_ids: UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase_: List[str] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self ): UpperCamelCase_: Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[str] = config_and_inputs UpperCamelCase_: Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Dict =( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self ): UpperCamelCase_: int = FlaxAlbertModelTester(self ) @slow def _a ( self ): for model_class_name in self.all_model_classes: UpperCamelCase_: int = model_class_name.from_pretrained('albert-base-v2' ) UpperCamelCase_: Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" @slow def _a ( self ): UpperCamelCase_: Any = FlaxAlbertModel.from_pretrained('albert-base-v2' ) UpperCamelCase_: Union[str, Any] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCamelCase_: int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCamelCase_: str = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0] UpperCamelCase_: Union[str, Any] = (1, 1_1, 7_6_8) self.assertEqual(output.shape , _lowerCamelCase ) UpperCamelCase_: Any = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
57
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) __UpperCAmelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = '''Normal''' if result[0][0] == 1: __UpperCAmelCase = '''Abnormality detected'''
40
0
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ): '''simple docstring''' snake_case_ : List[Any] = FunnelConfig.from_json_file(__UpperCamelCase ) print(F'Building PyTorch model from configuration: {config}' ) snake_case_ : Dict = FunnelBaseModel(__UpperCamelCase ) if base_model else FunnelModel(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_funnel(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __UpperCamelCase ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.''' ) __lowerCAmelCase : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
58
import os import pytest from attr import dataclass __UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" UpperCAmelCase__ : Union[str, Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self ) -> str: return F"""{self.framework}-transfromers-test""" @property def snake_case_ ( self ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
40
0
from math import ceil def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: str =list(range(0 , __a ) ) lowerCamelCase__: Dict =[item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase__: List[str] =[] for i in device_map_blocks: if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__a ) # Missing blocks lowerCamelCase__: Optional[Any] =[i for i in blocks if i not in device_map_blocks] lowerCamelCase__: Union[str, Any] =[i for i in device_map_blocks if i not in blocks] if len(__a ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(__a ) ) if len(__a ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(__a ) ) if len(__a ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(__a ) ) def lowerCAmelCase_ ( __a , __a ) -> Dict: """simple docstring""" lowerCamelCase__: List[Any] =list(range(__a ) ) lowerCamelCase__: Tuple =int(ceil(n_layers / len(__a ) ) ) lowerCamelCase__: Tuple =[layers[i : i + n_blocks] for i in range(0 , __a , __a )] return dict(zip(__a , __a ) )
59
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = '''src/transformers''' __UpperCAmelCase = '''docs/source/en/tasks''' def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]: with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCamelCase : Optional[Any] = f.readlines() # Find the start prompt. UpperCamelCase : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 UpperCamelCase : Optional[Any] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]: UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide] UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) UpperCamelCase : Tuple = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
40
0
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowerCAmelCase_ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def lowerCamelCase_ ( _UpperCamelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]: """simple docstring""" snake_case_ : Tuple = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): snake_case_ : Tuple = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() snake_case_ : int = job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
60
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : int = IFPipeline UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ) -> str: return self._get_dummy_components() def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case_ ( self ) -> Optional[int]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def snake_case_ ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ) -> Dict: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ) -> Optional[int]: self._test_save_load_local() def snake_case_ ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def snake_case_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> List[Any]: # if UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa ) UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Tuple = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : List[Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
40
0
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging UpperCamelCase = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int = 101 ) -> List[str]: lowerCAmelCase__ = length def __len__( self : Tuple ) -> Union[str, Any]: return self.length def __getitem__( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> int: return i class __lowerCamelCase : """simple docstring""" def __call__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> int: return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE__ ), "labels": torch.tensor(SCREAMING_SNAKE_CASE__ )} class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] ) -> Any: super().__init__() # Add some (unused) params otherwise DDP will complain. lowerCAmelCase__ = nn.Linear(120 , 80 ) def a ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Optional[Any]: if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" @require_torch_neuroncore def a ( self : Tuple ) -> Optional[Any]: lowerCAmelCase__ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = f'--output_dir {output_dir}'.split() lowerCAmelCase__ = ["torchrun"] + distributed_args + args execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" @require_torch_multi_gpu def a ( self : Dict ) -> List[str]: lowerCAmelCase__ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = f'--output_dir {output_dir}'.split() lowerCAmelCase__ = ["torchrun"] + distributed_args + args execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py UpperCamelCase = HfArgumentParser((TrainingArguments,)) UpperCamelCase = parser.parse_args_into_dataclasses()[0] logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: UpperCamelCase = DummyDataset(dataset_length) def _A ( lowerCAmelCase_ : EvalPrediction ): """simple docstring""" lowerCAmelCase__ = list(range(len(lowerCAmelCase_ ) ) ) lowerCAmelCase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' ) return {"success": success} UpperCamelCase = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) UpperCamelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCamelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCamelCase = 2 UpperCamelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCamelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCamelCase = None
61
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase ( snake_case__ : Tuple="" ) -> str: UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> int: UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) ) def snake_case_ ( self ) -> Any: UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' ) sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 ) UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) ) UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = 'Hey!' UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
0
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : Optional[Any]=17 , UpperCAmelCase_ : Optional[Any]=23 , UpperCAmelCase_ : Optional[int]=11 , UpperCAmelCase_ : Any=True , ): SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : Tuple = seq_length SCREAMING_SNAKE_CASE : List[str] = act_dim SCREAMING_SNAKE_CASE : Optional[int] = state_dim SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = is_training def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) SCREAMING_SNAKE_CASE : int = floats_tensor((self.batch_size, self.seq_length, 1) ) SCREAMING_SNAKE_CASE : Any = floats_tensor((self.batch_size, self.seq_length, 1) ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) SCREAMING_SNAKE_CASE : str = random_attention_mask((self.batch_size, self.seq_length) ) SCREAMING_SNAKE_CASE : str = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def _A ( self : List[Any] ): return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , ): SCREAMING_SNAKE_CASE : Dict = DecisionTransformerModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Any = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = (DecisionTransformerModel,) if is_torch_available() else () UpperCamelCase_ : Dict = () UpperCamelCase_ : Optional[int] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids UpperCamelCase_ : str = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features UpperCamelCase_ : Tuple = False UpperCamelCase_ : str = False UpperCamelCase_ : Union[str, Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Tuple = False UpperCamelCase_ : Union[str, Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Dict = False UpperCamelCase_ : Optional[Any] = False def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : str = DecisionTransformerModelTester(self ) SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def _A ( self : Optional[int] ): self.config_tester.run_common_tests() def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) @slow def _A ( self : str ): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = DecisionTransformerModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Union[str, Any] = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(UpperCAmelCase_ )] , UpperCAmelCase_ ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Optional[int] = 2 # number of steps of autoregressive prediction we will perform SCREAMING_SNAKE_CASE : Tuple = 10 # defined by the RL environment, may be normalized SCREAMING_SNAKE_CASE : int = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) SCREAMING_SNAKE_CASE : int = model.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = model.config torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase_ , dtype=torch.floataa ) # env.reset() SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) SCREAMING_SNAKE_CASE : Tuple = state SCREAMING_SNAKE_CASE : str = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase_ , dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(1 , 0 , device=UpperCAmelCase_ , dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = torch.tensor(0 , device=UpperCAmelCase_ , dtype=torch.long ).reshape(1 , 1 ) for step in range(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase_ )] , dim=1 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase_ )] , dim=1 ) SCREAMING_SNAKE_CASE : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = model( states=UpperCAmelCase_ , actions=UpperCAmelCase_ , rewards=UpperCAmelCase_ , returns_to_go=UpperCAmelCase_ , timesteps=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase_ , dtype=torch.floataa ), 1.0, False, {}, ) SCREAMING_SNAKE_CASE : List[Any] = action_pred[0, -1] SCREAMING_SNAKE_CASE : str = torch.cat([states, state] , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = returns_to_go[0, -1] - reward SCREAMING_SNAKE_CASE : Dict = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat( [timesteps, torch.ones((1, 1) , device=UpperCAmelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
62
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : int = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase : List[Any] = [] UpperCamelCase : List[Any] = list(range(snake_case__ ) ) # Find permutation while factorials: UpperCamelCase : int = factorials.pop() UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
40
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase__ ( __lowerCamelCase : Tuple ): __UpperCAmelCase : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2] __UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : int = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 3, 3] __UpperCAmelCase : Union[str, Any] = [5, 5, 5, 5] elif "fl4" in model_name: __UpperCAmelCase : str = [4, 4, 4, 4] __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __UpperCAmelCase : Dict = [3, 3, 3, 3] if "lrf" in model_name: __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] else: __UpperCAmelCase : Optional[int] = [2, 2, 2, 2] if "tiny" in model_name: __UpperCAmelCase : List[str] = 96 elif "small" in model_name: __UpperCAmelCase : Dict = 96 elif "base" in model_name: __UpperCAmelCase : List[Any] = 128 elif "large" in model_name: __UpperCAmelCase : Any = 192 elif "xlarge" in model_name: __UpperCAmelCase : Tuple = 256 elif "huge" in model_name: __UpperCAmelCase : int = 352 # set label information __UpperCAmelCase : Tuple = """huggingface/label-files""" if "large" in model_name or "huge" in model_name: __UpperCAmelCase : Any = """imagenet-22k-id2label.json""" else: __UpperCAmelCase : Dict = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : List[str] = FocalNetConfig( embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , ) return config def lowerCamelCase__ ( __lowerCamelCase : Tuple ): if "patch_embed.proj" in name: __UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: __UpperCAmelCase : int = """encoder.""" + name if "encoder.layers" in name: __UpperCAmelCase : Optional[int] = name.replace("""encoder.layers""" , """encoder.stages""" ) if "downsample.proj" in name: __UpperCAmelCase : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" ) if "blocks" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""blocks""" , """layers""" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __UpperCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __UpperCAmelCase : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __UpperCAmelCase : str = name.replace("""modulation.proj""" , """modulation.projection_out""" ) if name == "norm.weight": __UpperCAmelCase : Optional[Any] = """layernorm.weight""" if name == "norm.bias": __UpperCAmelCase : Dict = """layernorm.bias""" if "head" in name: __UpperCAmelCase : Tuple = name.replace("""head""" , """classifier""" ) else: __UpperCAmelCase : int = """focalnet.""" + name return name def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ): # fmt: off __UpperCAmelCase : Dict = { """focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""", """focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""", """focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""", """focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""", """focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""", """focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""", """focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""", """focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""", """focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""", """focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""", } # fmt: on __UpperCAmelCase : int = model_name_to_url[model_name] print("""Checkpoint URL: """ , __lowerCamelCase ) __UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""] # rename keys for key in state_dict.copy().keys(): __UpperCAmelCase : Tuple = state_dict.pop(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = val __UpperCAmelCase : Optional[Any] = get_focalnet_config(__lowerCamelCase ) __UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase ) model.eval() # load state dict model.load_state_dict(__lowerCamelCase ) # verify conversion __UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Union[str, Any] = BitImageProcessor( do_resize=__lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , ) __UpperCAmelCase : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) __UpperCAmelCase : List[Any] = processor(images=__lowerCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : str = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __UpperCAmelCase : List[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 ) __UpperCAmelCase : Dict = model(**__lowerCamelCase ) __UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) print("""First values of logits:""" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __UpperCAmelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __UpperCAmelCase : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __UpperCAmelCase : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __UpperCAmelCase : Optional[int] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __UpperCAmelCase : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(f"""{model_name}""" ) processor.push_to_hub(f"""{model_name}""" ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) a : Any = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
63
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase_ ( a__ ): def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) ) class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any: UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : List[Any] = image_size UpperCamelCase : List[str] = patch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 ) UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[int] = conv_kernel_size UpperCamelCase : List[str] = output_stride UpperCamelCase : Union[str, Any] = classifier_dropout_prob UpperCamelCase : List[Any] = use_labels UpperCamelCase : Any = is_training UpperCamelCase : int = num_labels UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Tuple = scope UpperCamelCase : List[str] = width_multiplier UpperCamelCase : Any = ffn_dropout UpperCamelCase : List[Any] = attn_dropout def snake_case_ ( self ) -> int: UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[str] = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) UpperCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def snake_case_ ( self ) -> int: return MobileViTVaConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase : Any = self.num_labels UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ : Any = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Dict = MobileViTVaModelTester(self ) UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def snake_case_ ( self ) -> int: pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def snake_case_ ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def snake_case_ ( self ) -> Dict: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> List[str]: UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : str = [*signature.parameters.keys()] UpperCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Tuple = outputs.hidden_states UpperCamelCase : Dict = 5 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase : Any = 2 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[int] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> str: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) @slow def snake_case_ ( self ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Tuple: UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> str: return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ], device=SCREAMING_SNAKE_CASE_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) ) @slow def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase : Tuple = prepare_img() UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = outputs.logits.detach().cpu() UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] ) UpperCamelCase : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
40
0
def A__ ( snake_case_ : list , snake_case_ : int = 0 ): SCREAMING_SNAKE_CASE__: Union[str, Any]= length or len(snake_case_ ) SCREAMING_SNAKE_CASE__: Tuple= False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE__: Tuple= True return list_data if not swapped else bubble_sort(snake_case_ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
64
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str: UpperCamelCase : List[str] = [0] * len(snake_case__ ) UpperCamelCase : int = [] UpperCamelCase : Optional[int] = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: UpperCamelCase : Optional[int] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase : Tuple = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph __UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
40
0
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __lowercase : def __init__( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = """""" UpperCAmelCase__ : Optional[Any] = """""" UpperCAmelCase__ : Dict = [] UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : int = 256 UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : str = 0 UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : Dict = 0 def __lowercase ( self : Optional[Any] ,A : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = cva.imread(A ,0 ) UpperCAmelCase__ : Union[str, Any] = copy.deepcopy(self.img ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label="""x""" ) UpperCAmelCase__ : Dict = np.sum(A ) for i in range(len(A ) ): UpperCAmelCase__ : Optional[int] = x[i] / self.k self.sk += prk UpperCAmelCase__ : str = (self.L - 1) * self.sk if self.rem != 0: UpperCAmelCase__ : int = int(last % last ) UpperCAmelCase__ : Optional[int] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(A ) UpperCAmelCase__ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) UpperCAmelCase__ : List[Any] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCAmelCase__ : int = self.img[j][i] if num != self.last_list[num]: UpperCAmelCase__ : Optional[Any] = self.last_list[num] cva.imwrite("""output_data/output.jpg""" ,self.img ) def __lowercase ( self : List[Any] ): '''simple docstring''' plt.hist(self.img.ravel() ,256 ,[0, 256] ) def __lowercase ( self : str ): '''simple docstring''' cva.imshow("""Output-Image""" ,self.img ) cva.imshow("""Input-Image""" ,self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') __UpperCAmelCase = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
65
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
40
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ViTFeatureExtractor"] UpperCamelCase = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = '''ClapFeatureExtractor''' SCREAMING_SNAKE_CASE_ : Optional[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : int ,__A : Union[str, Any] ,__A : Union[str, Any] ) -> Dict: super().__init__(__A ,__A ) def __call__( self : Dict ,__A : Dict=None ,__A : Tuple=None ,__A : Optional[Any]=None ,**__A : List[Any] ) -> List[str]: _lowercase = kwargs.pop('sampling_rate' ,__A ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _lowercase = self.tokenizer(__A ,return_tensors=__A ,**__A ) if audios is not None: _lowercase = self.feature_extractor( __A ,sampling_rate=__A ,return_tensors=__A ,**__A ) if text is not None and audios is not None: _lowercase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) ,tensor_type=__A ) def __UpperCAmelCase ( self : List[str] ,*__A : Optional[Any] ,**__A : Optional[Any] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*__A ,**__A ) def __UpperCAmelCase ( self : Tuple ,*__A : int ,**__A : List[Any] ) -> Tuple: return self.tokenizer.decode(*__A ,**__A ) @property def __UpperCAmelCase ( self : str ) -> List[Any]: _lowercase = self.tokenizer.model_input_names _lowercase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
67
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _A ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Dict = StableUnCLIPImgaImgPipeline lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS lowerCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCamelCase : List[Any] = frozenset([] ) def _a ( self : Any ) -> Optional[int]: __UpperCAmelCase =32 __UpperCAmelCase =embedder_hidden_size # image encoding components __UpperCAmelCase =CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __UpperCAmelCase =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase =StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __UpperCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __UpperCAmelCase =CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __UpperCAmelCase =UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __UpperCAmelCase =DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase =AutoencoderKL() __UpperCAmelCase ={ # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=True ) -> Dict: if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __UpperCAmelCase =torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) if pil_image: __UpperCAmelCase =input_image * 0.5 + 0.5 __UpperCAmelCase =input_image.clamp(0 , 1 ) __UpperCAmelCase =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __UpperCAmelCase =DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _a ( self : Tuple ) -> str: __UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase =self.get_dummy_components() __UpperCAmelCase =StableUnCLIPImgaImgPipeline(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) inputs.update({"""image_embeds""": None} ) __UpperCAmelCase =sd_pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : int ) -> str: __UpperCAmelCase =torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Any: __UpperCAmelCase =torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : List[Any] ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : str ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Tuple ) -> List[str]: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) __UpperCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) __UpperCAmelCase =StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) __UpperCAmelCase =pipe(__SCREAMING_SNAKE_CASE , """anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" ) __UpperCAmelCase =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) __UpperCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) __UpperCAmelCase =StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) __UpperCAmelCase =pipe(__SCREAMING_SNAKE_CASE , """anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" ) __UpperCAmelCase =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase =StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) __UpperCAmelCase =pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase =pipe( __SCREAMING_SNAKE_CASE , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) __UpperCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
68
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''ViTFeatureExtractor'''] __UpperCAmelCase = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : str ) -> list: __snake_case = [0] * len(_UpperCAmelCase ) for i in range(1 , len(_UpperCAmelCase ) ): # use last results for better performance - dynamic programming __snake_case = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: __snake_case = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 __snake_case = j return prefix_result def __UpperCAmelCase ( _UpperCAmelCase : str ) -> int: return max(prefix_function(_UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
69
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __UpperCAmelCase = random.Random() def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any: if rng is None: UpperCamelCase : int = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]: UpperCamelCase : Dict = parent UpperCamelCase : Dict = batch_size UpperCamelCase : Any = min_seq_length UpperCamelCase : Optional[int] = max_seq_length UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Tuple = feature_size UpperCamelCase : Any = padding_value UpperCamelCase : Tuple = sampling_rate UpperCamelCase : Optional[Any] = return_attention_mask UpperCamelCase : Optional[Any] = do_normalize def snake_case_ ( self ) -> Union[str, Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Union[str, Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Any = WavaVecaFeatureExtractor def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) ) def snake_case_ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test batched UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : Any = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Tuple = range(800, 1400, 200 ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths] UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad'] UpperCamelCase : List[str] = [None, 1600, None] for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : int = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' ) UpperCamelCase : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' ) UpperCamelCase : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase : Any = feat_extract( SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' ) UpperCamelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def snake_case_ ( self ) -> str: import torch UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def snake_case_ ( self ) -> Tuple: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
40
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} lowerCamelCase : Tuple = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } lowerCamelCase : Any = { "abeja/gpt-neox-japanese-2.7b": 2_048, } def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ): '''simple docstring''' with open(lowercase , 'r' , encoding='utf-8' ) as f: lowerCamelCase_ = json.loads(f.read() ) lowerCamelCase_ = collections.OrderedDict() lowerCamelCase_ = collections.OrderedDict() lowerCamelCase_ = collections.OrderedDict() with open(lowercase , 'r' , encoding='utf-8' ) as f: lowerCamelCase_ = f.readlines() lowerCamelCase_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(lowercase ): lowerCamelCase_ = b lowerCamelCase_ = idx for wd in b: lowerCamelCase_ = idx return vocab, raw_vocab, ids_to_tokens, emoji class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : str , A_ : Any , A_ : Any , A_ : Optional[Any]="<|endoftext|>" , A_ : Any="<|endoftext|>" , A_ : Optional[int]="<|startoftext|>" , A_ : Union[str, Any]="<|endoftext|>" , A_ : Any=False , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__( unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , ) if not os.path.isfile(A_ ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(A_ ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) lowerCamelCase_ = do_clean_text lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_vocab_and_emoji(A_ , A_ ) lowerCamelCase_ = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def a__ ( self : List[str] ) -> Dict: """simple docstring""" return len(self.raw_vocab ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def a__ ( self : Optional[Any] , A_ : str ) -> Tuple: """simple docstring""" return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text ) def a__ ( self : Optional[int] , A_ : Dict ) -> List[Any]: """simple docstring""" return self.vocab.get(A_ , self.vocab.get(self.unk_token ) ) def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> int: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(A_ ) def a__ ( self : Optional[int] , A_ : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = ''.join(A_ ).strip() return out_string def a__ ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] return input_ids def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowerCamelCase_ = 0 if os.path.isdir(A_ ): lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: lowerCamelCase_ = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) lowerCamelCase_ = token_index writer.write(','.join(A_ ) + '\n' ) index += 1 with open(A_ , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , A_ ) return vocab_file, emoji_file class A( UpperCamelCase ): '''simple docstring''' def __init__( self : Any , A_ : Union[str, Any] , A_ : int , A_ : Tuple ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = vocab # same as swe lowerCamelCase_ = ids_to_tokens # same as bpe lowerCamelCase_ = emoji lowerCamelCase_ = np.max([len(A_ ) for w in self.vocab.keys()] ) lowerCamelCase_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) lowerCamelCase_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) lowerCamelCase_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) lowerCamelCase_ = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCamelCase_ = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCamelCase_ = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) lowerCamelCase_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' lowerCamelCase_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' lowerCamelCase_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self : str ) -> Optional[int]: """simple docstring""" return len(self.ids_to_tokens ) def a__ ( self : Union[str, Any] , A_ : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.content_repattera.sub('<URL>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<EMAIL>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<TEL>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<PRICE>' , A_ ) lowerCamelCase_ = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCamelCase_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=False ) -> Dict: """simple docstring""" lowerCamelCase_ = text.replace(' ' , '<SP>' ) lowerCamelCase_ = text.replace(' ' , '<SP>' ) lowerCamelCase_ = text.replace('\r\n' , '<BR>' ) lowerCamelCase_ = text.replace('\n' , '<BR>' ) lowerCamelCase_ = text.replace('\r' , '<BR>' ) lowerCamelCase_ = text.replace('\t' , '<TAB>' ) lowerCamelCase_ = text.replace('—' , 'ー' ) lowerCamelCase_ = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCamelCase_ = text.replace(A_ , A_ ) if clean: lowerCamelCase_ = self.clean_text(A_ ) def check_simbol(A_ : Union[str, Any] ): lowerCamelCase_ = x.encode() if len(A_ ) == 1 and len(A_ ) == 2: lowerCamelCase_ = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC2_A1 and c <= 0XC2_BF) or (c >= 0XC7_80 and c <= 0XC7_83) or (c >= 0XCA_B9 and c <= 0XCB_BF) or (c >= 0XCC_80 and c <= 0XCD_A2) ): return True return False def checkuae(A_ : Tuple ): lowerCamelCase_ = x.encode() if len(A_ ) == 1 and len(A_ ) == 3: lowerCamelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE2_80_80 and c <= 0XE2_B0_7F: return True return False lowerCamelCase_ = 0 lowerCamelCase_ = [] while pos < len(A_ ): lowerCamelCase_ = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 lowerCamelCase_ = [] # (token_id, token, pos) for e in range(A_ , A_ , -1 ): lowerCamelCase_ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(A_ ) > 2: lowerCamelCase_ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(A_ ) > 0: # the smallest token_id is adopted lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[0] )[0] result.append(A_ ) lowerCamelCase_ = e else: lowerCamelCase_ = pos + 1 lowerCamelCase_ = text[pos:end] if check_simbol(A_ ): result.append('<KIGOU>' ) elif checkuae(A_ ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) lowerCamelCase_ = end return result def a__ ( self : List[Any] , A_ : Tuple , A_ : List[str]="\n" ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] lowerCamelCase_ = [] lowerCamelCase_ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(A_ ) > 0: words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) ) lowerCamelCase_ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(A_ ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(A_ ) if len(A_ ) > 0: words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) ) lowerCamelCase_ = ''.join(A_ ) return text
70
def UpperCamelCase ( snake_case__ : int ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCamelCase : int = False if num < 0: UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case (unittest.TestCase): def __init__( self ,_snake_case ,_snake_case=3 ,_snake_case=32 ,_snake_case=3 ,_snake_case=10 ,_snake_case=[10, 20, 30, 40] ,_snake_case=[1, 1, 2, 1] ,_snake_case=True ,_snake_case=True ,_snake_case="relu" ,_snake_case=3 ,_snake_case=None ,): UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = embeddings_size UpperCAmelCase_ : int = hidden_sizes UpperCAmelCase_ : Tuple = depths UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : List[Any] = num_labels UpperCAmelCase_ : Any = scope UpperCAmelCase_ : str = len(_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values def UpperCamelCase__ ( self ): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : int = FlaxRegNetModel(config=_snake_case ) UpperCAmelCase_ : int = model(_snake_case ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : int = FlaxRegNetForImageClassification(config=_snake_case ) UpperCAmelCase_ : Optional[Any] = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = config_and_inputs UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __A : Any =False __A : List[str] =False __A : Union[str, Any] =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = FlaxRegNetModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @unittest.skip(reason="RegNet does not use inputs_embeds" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_snake_case ) UpperCAmelCase_ : Tuple = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCamelCase__ ( self ): def check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : str = model_class(_snake_case ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) UpperCAmelCase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Tuple = self.model_tester.num_stages self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(_snake_case ,_snake_case ) UpperCAmelCase_ : Union[str, Any] = model_class(_snake_case ) @jax.jit def model_jitted(_snake_case ,**_snake_case ): return model(pixel_values=_snake_case ,**_snake_case ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ : Tuple = model_jitted(**_snake_case ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ : Optional[int] = model_jitted(**_snake_case ).to_tuple() self.assertEqual(len(_snake_case ) ,len(_snake_case ) ) for jitted_output, output in zip(_snake_case ,_snake_case ): self.assertEqual(jitted_output.shape ,output.shape ) def a__ ( ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class _snake_case (unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" ) UpperCAmelCase_ : Dict = self.default_image_processor UpperCAmelCase_ : List[Any] = prepare_img() UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="np" ) UpperCAmelCase_ : int = model(**_snake_case ) # verify the logits UpperCAmelCase_ : Tuple = (1, 10_00) self.assertEqual(outputs.logits.shape ,_snake_case ) UpperCAmelCase_ : Any = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
71
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: UpperCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) UpperCamelCase : Dict = new_module UpperCamelCase : int = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) UpperCamelCase : Union[str, Any] = tensor_name in module._buffers UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) UpperCamelCase : Optional[Any] = False UpperCamelCase : str = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase : List[str] = False UpperCamelCase : Tuple = False else: UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCamelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase : Dict = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[Any] = value.to('cpu' ) if value.dtype == torch.inta: UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: UpperCamelCase : Union[str, Any] = new_value.T UpperCamelCase : Union[str, Any] = old_value.__dict__ if is_abit: UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) UpperCamelCase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[str] = value.to(snake_case__ ) else: UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: UpperCamelCase : Optional[int] = new_value else: UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) UpperCamelCase : List[str] = new_value def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int: for name, module in model.named_children(): if current_key_name is None: UpperCamelCase : str = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape else: UpperCamelCase : Any = module.in_features UpperCamelCase : List[str] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase : Any = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase : Optional[int] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase : str = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase : int = True # Store the module class in case we need to transpose the weight later UpperCamelCase : Any = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]: UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] ) UpperCamelCase : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase : List[Any] = list(model.named_children() ) UpperCamelCase : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys UpperCamelCase : Tuple = ['.weight', '.bias'] UpperCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
40
0
'''simple docstring''' def UpperCamelCase ( lowercase_ : list[int] ) -> int: '''simple docstring''' if not numbers: return 0 if not isinstance(lowercase_ , (list, tuple) ) or not all( isinstance(lowercase_ , lowercase_ ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowercase =lowercase =lowercase =numbers[0] for i in range(1 , len(lowercase_ ) ): # update the maximum and minimum subarray products lowercase =numbers[i] if number < 0: lowercase , lowercase =min_till_now, max_till_now lowercase =max(lowercase_ , max_till_now * number ) lowercase =min(lowercase_ , min_till_now * number ) # update the maximum product found till now lowercase =max(lowercase_ , lowercase_ ) return max_prod
72
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( snake_case__ : int ) -> Dict: UpperCamelCase : Optional[Any] = tmp_path / 'file.csv' UpperCamelCase : Optional[Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]: UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' UpperCamelCase : Any = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Any = tmp_path / 'csv_with_image.csv' UpperCamelCase : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple: UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv' UpperCamelCase : Dict = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv' UpperCamelCase : Union[str, Any] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]: UpperCamelCase : str = Csv() UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case__ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(snake_case__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : List[str] = f.read().splitlines()[1] UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] ) UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() UpperCamelCase : str = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( snake_case__ : Any ) -> str: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : Any = f.read().splitlines()[1:] UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] ) UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() UpperCamelCase : List[str] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels] def UpperCamelCase ( snake_case__ : str ) -> List[Any]: UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} ) UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] ) UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) UpperCamelCase : str = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
40
0
import argparse import collections import json import os import re import string import sys import numpy as np a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a_ : List[str] = None def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.') parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.') parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).') parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.') parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.') parser.add_argument('--verbose' , '-v' , action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = bool(qa['answers']['text']) return qid_to_has_ans def lowerCamelCase__ (_UpperCAmelCase): def remove_articles(_UpperCAmelCase): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase) def white_space_fix(_UpperCAmelCase): return " ".join(text.split()) def remove_punc(_UpperCAmelCase): SCREAMING_SNAKE_CASE = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(_UpperCAmelCase): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase)))) def lowerCamelCase__ (_UpperCAmelCase): if not s: return [] return normalize_answer(_UpperCAmelCase).split() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(common.values()) if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = qa['id'] SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE = [''] if qid not in preds: print(F'''Missing prediction for {qid}''') continue SCREAMING_SNAKE_CASE = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) return exact_scores, fa_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid]) else: SCREAMING_SNAKE_CASE = s return new_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): if not qid_list: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values()) / total), ('f1', 1_00.0 * sum(fa_scores.values()) / total), ('total', total), ]) else: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total), ('total', total), ]) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for k in new_eval: SCREAMING_SNAKE_CASE = new_eval[k] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post') plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(_UpperCAmelCase) plt.savefig(_UpperCAmelCase) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = [1.0] SCREAMING_SNAKE_CASE = [0.0] SCREAMING_SNAKE_CASE = 0.0 for i, qid in enumerate(_UpperCAmelCase): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE = true_pos / float(i + 1) SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase) if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase) recalls.append(_UpperCAmelCase) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return {"ap": 1_00.0 * avg_prec} def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if out_image_dir and not os.path.exists(_UpperCAmelCase): os.makedirs(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , ) SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , ) SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if not qid_list: return SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase)) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title(F'''Histogram of no-answer probability: {name}''') plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png''')) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) SCREAMING_SNAKE_CASE = num_no_ans SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) for i, qid in enumerate(_UpperCAmelCase): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE = -1 else: SCREAMING_SNAKE_CASE = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = na_probs[qid] return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = best_exact SCREAMING_SNAKE_CASE = exact_thresh SCREAMING_SNAKE_CASE = best_fa SCREAMING_SNAKE_CASE = fa_thresh def lowerCamelCase__ (): with open(OPTS.data_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = dataset_json['data'] with open(OPTS.pred_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase) if has_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns') if no_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns') histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns') if OPTS.out_file: with open(OPTS.out_file , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) else: print(json.dumps(_UpperCAmelCase , indent=2)) if __name__ == "__main__": a_ : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
73
import math import random def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float: UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase : int = (expected / 100) - layer_a # Error delta UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('''Expected value: ''')) __UpperCAmelCase = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
40
0
def a__ ( snake_case ): """simple docstring""" if n == 1 or not isinstance(snake_case , snake_case ): return 0 elif n == 2: return 1 else: __SCREAMING_SNAKE_CASE : Any = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : Optional[int] = 2 while digits < n: index += 1 __SCREAMING_SNAKE_CASE : List[Any] = len(str(fibonacci(snake_case ) ) ) return index def a__ ( snake_case = 1_000 ): """simple docstring""" return fibonacci_digits_index(snake_case ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
74
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]: return EnvironmentCommand() class lowerCAmelCase_ ( a__ ): @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase : List[Any] = parser.add_parser('env' ) download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = huggingface_hub.__version__ UpperCamelCase : int = 'not installed' UpperCamelCase : Union[str, Any] = 'NA' if is_torch_available(): import torch UpperCamelCase : Any = torch.__version__ UpperCamelCase : str = torch.cuda.is_available() UpperCamelCase : Dict = 'not installed' if is_transformers_available(): import transformers UpperCamelCase : str = transformers.__version__ UpperCamelCase : Optional[Any] = 'not installed' if is_accelerate_available(): import accelerate UpperCamelCase : Dict = accelerate.__version__ UpperCamelCase : List[str] = 'not installed' if is_xformers_available(): import xformers UpperCamelCase : List[str] = xformers.__version__ UpperCamelCase : Dict = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(SCREAMING_SNAKE_CASE_ ) ) return info @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
40
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowerCamelCase_ : def __init__( self : Dict , _A : str=2 , _A : Tuple=3 , _A : Tuple=64 , _A : str=None ): '''simple docstring''' UpperCAmelCase__ : int = np.random.default_rng(_A ) UpperCAmelCase__ : Any = length UpperCAmelCase__ : int = rng.normal(size=(length,) ).astype(np.floataa ) UpperCAmelCase__ : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Tuple ): '''simple docstring''' return self.length def __getitem__( self : Union[str, Any] , _A : Optional[Any] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class lowerCamelCase_ ( torch.nn.Module ): def __init__( self : str , _A : List[str]=0 , _A : List[Any]=0 , _A : int=False ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase__ : List[Any] = True def lowercase_ ( self : Any , _A : List[Any]=None ): '''simple docstring''' if self.first_batch: print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) UpperCAmelCase__ : Any = False return x * self.a[0] + self.b[0] class lowerCamelCase_ ( torch.nn.Module ): def __init__( self : Optional[int] , _A : List[str]=0 , _A : Optional[int]=0 , _A : Any=False ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Dict = torch.nn.Parameter(torch.tensor(_A ).float() ) UpperCAmelCase__ : Optional[Any] = torch.nn.Parameter(torch.tensor(_A ).float() ) UpperCAmelCase__ : str = True def lowercase_ ( self : str , _A : List[Any]=None ): '''simple docstring''' if self.first_batch: print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) UpperCAmelCase__ : int = False return x * self.a + self.b def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: from datasets import load_dataset from transformers import AutoTokenizer UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : Dict = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} UpperCAmelCase__ : str = load_dataset('''csv''' , data_files=lowerCAmelCase__ ) UpperCAmelCase__ : Dict = datasets['''train'''].unique('''label''' ) UpperCAmelCase__ : Union[str, Any] = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Any = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) if "label" in examples: UpperCAmelCase__ : Union[str, Any] = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase__ : Optional[int] = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase__ : Optional[Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) UpperCAmelCase__ : Tuple = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
75
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''▁''' __UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } __UpperCAmelCase = { '''facebook/xglm-564M''': 2_048, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None: UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCamelCase : Any = 7 UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} UpperCamelCase : Optional[int] = len(self.sp_model ) UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: UpperCamelCase : int = self.__dict__.copy() UpperCamelCase : Union[str, Any] = None UpperCamelCase : int = self.sp_model.serialized_model_proto() return state def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase : Any = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): UpperCamelCase : Any = {} UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCamelCase : Optional[int] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : str = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case_ ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip() return out_string def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi: UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
40
0
"""simple docstring""" a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __UpperCAmelCase ( ): __lowercase : Any = input('''Enter message: ''' ) __lowercase : Union[str, Any] = input('''Enter key [alphanumeric]: ''' ) __lowercase : List[Any] = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): __lowercase : Union[str, Any] = '''encrypt''' __lowercase : Any = encrypt_message(__UpperCamelCase , __UpperCamelCase ) elif mode.lower().startswith('''d''' ): __lowercase : int = '''decrypt''' __lowercase : Dict = decrypt_message(__UpperCamelCase , __UpperCamelCase ) print(f"""\n{mode.title()}ed message:""" ) print(__UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return translate_message(__UpperCamelCase , __UpperCamelCase , '''encrypt''' ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return translate_message(__UpperCamelCase , __UpperCamelCase , '''decrypt''' ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Optional[Any] = [] __lowercase : Union[str, Any] = 0 __lowercase : Optional[int] = key.upper() for symbol in message: __lowercase : Optional[Any] = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__UpperCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__UpperCamelCase ): __lowercase : List[Any] = 0 else: translated.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) if __name__ == "__main__": main()
76
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCAmelCase = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __UpperCAmelCase = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] UpperCAmelCase__ : Dict = RobertaTokenizer def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) ) UpperCamelCase : List[str] = add_prefix_space UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = add_prefix_space UpperCamelCase : Optional[Any] = 'post_processor' UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase : Optional[Any] = tuple(state['sep'] ) if "cls" in state: UpperCamelCase : Optional[int] = tuple(state['cls'] ) UpperCamelCase : Any = False if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase : Optional[int] = add_prefix_space UpperCamelCase : List[Any] = True if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase : Dict = trim_offsets UpperCamelCase : Union[str, Any] = True if changes_to_apply: UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) ) UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase : List[Any] = value def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding: UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple: UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
40
0
"""simple docstring""" import torch from diffusers import DiffusionPipeline class a__ ( __magic_name__ ): def __init__( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any): """simple docstring""" super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_) def __call__( self : Optional[int]): """simple docstring""" __UpperCAmelCase : List[str] = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) __UpperCAmelCase : int = 1 __UpperCAmelCase : str = self.unet(UpperCamelCase_ , UpperCamelCase_).sample __UpperCAmelCase : List[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample __UpperCAmelCase : str = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_) return result
77
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple: super().__init__(features=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch_tensor_kwargs import torch # noqa import torch at initialization def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict: import torch if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any: import torch if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ): return value.tolist() UpperCamelCase : str = {} if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ): UpperCamelCase : List[str] = {'dtype': torch.intaa} elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ): UpperCamelCase : int = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: import torch # support for torch, tf, jax etc. if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ): UpperCamelCase : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor": UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] ) UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: UpperCamelCase : str = self._consolidate(batch[column_name] ) return batch
40
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> int: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ = f"""The input value of [n={number}] has to be > 0""" raise ValueError(snake_case_ ) else: UpperCAmelCase_ = sylvester(number - 1 ) UpperCAmelCase_ = num - 1 UpperCAmelCase_ = num return lower * upper + 1 if __name__ == "__main__": print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
78
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: UpperCamelCase : int = ( 'Wrong input data\'s dimensions... ' F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(snake_case__ ) try: if dataset.shape[1] != value_array.shape[1]: UpperCamelCase : str = ( 'Wrong input data\'s shape... ' F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(snake_case__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: UpperCamelCase : Dict = ( 'Input data have different datatype... ' F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(snake_case__ ) UpperCamelCase : List[Any] = [] for value in value_array: UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] ) UpperCamelCase : Dict = dataset[0].tolist() for dataset_value in dataset[1:]: UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ ) if dist > temp_dist: UpperCamelCase : str = temp_dist UpperCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float: return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ )) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'blip_2_vision_model' def __init__( self , _lowerCAmelCase=1408 , _lowerCAmelCase=6144 , _lowerCAmelCase=39 , _lowerCAmelCase=16 , _lowerCAmelCase=224 , _lowerCAmelCase=14 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0_0_0_0_1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=1e-10 , _lowerCAmelCase=True , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) UpperCAmelCase__ : Tuple = hidden_size UpperCAmelCase__ : int = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : Union[str, Any] = patch_size UpperCAmelCase__ : Dict = image_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : Tuple = attention_dropout UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Tuple = qkv_bias @classmethod def __UpperCAmelCase ( cls , _lowerCAmelCase , **_lowerCAmelCase ): cls._set_token_in_kwargs(_lowerCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ : str = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": UpperCAmelCase__ : List[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'blip_2_qformer' def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase=2 , _lowerCAmelCase=1408 , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : int = intermediate_size UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : int = max_position_embeddings UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = layer_norm_eps UpperCAmelCase__ : Dict = position_embedding_type UpperCAmelCase__ : Tuple = cross_attention_frequency UpperCAmelCase__ : Optional[Any] = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls , _lowerCAmelCase , **_lowerCAmelCase ): cls._set_token_in_kwargs(_lowerCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": UpperCAmelCase__ : Any = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'blip-2' __lowerCamelCase = True def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=32 , **_lowerCAmelCase ): super().__init__(**_lowerCAmelCase ) if vision_config is None: UpperCAmelCase__ : int = {} logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" ) if qformer_config is None: UpperCAmelCase__ : str = {} logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" ) if text_config is None: UpperCAmelCase__ : Dict = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) UpperCAmelCase__ : Any = BlipaVisionConfig(**_lowerCAmelCase ) UpperCAmelCase__ : str = BlipaQFormerConfig(**_lowerCAmelCase ) UpperCAmelCase__ : Optional[int] = text_config["""model_type"""] if """model_type""" in text_config else """opt""" UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[text_model_type](**_lowerCAmelCase ) UpperCAmelCase__ : Tuple = self.text_config.tie_word_embeddings UpperCAmelCase__ : str = self.text_config.is_encoder_decoder UpperCAmelCase__ : Optional[Any] = num_query_tokens UpperCAmelCase__ : Optional[Any] = self.vision_config.hidden_size UpperCAmelCase__ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES UpperCAmelCase__ : List[Any] = 1.0 UpperCAmelCase__ : Optional[Any] = 0.0_2 @classmethod def __UpperCAmelCase ( cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCAmelCase , ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : str = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : str = self.vision_config.to_dict() UpperCAmelCase__ : Dict = self.qformer_config.to_dict() UpperCAmelCase__ : List[Any] = self.text_config.to_dict() UpperCAmelCase__ : List[Any] = self.__class__.model_type return output
79
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) __UpperCAmelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = '''Normal''' if result[0][0] == 1: __UpperCAmelCase = '''Abnormality detected'''
40
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Any = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ): __snake_case :List[Any] = 'convnextv2' def __init__( self : List[Any] , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : str=224 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" super().__init__(**_lowerCAmelCase ) __lowercase = num_channels __lowercase = patch_size __lowercase = num_stages __lowercase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __lowercase = [3, 3, 9, 3] if depths is None else depths __lowercase = hidden_act __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = drop_path_rate __lowercase = image_size __lowercase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
80
import os import pytest from attr import dataclass __UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" UpperCAmelCase__ : Union[str, Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self ) -> str: return F"""{self.framework}-transfromers-test""" @property def snake_case_ ( self ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
40
0