code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import numpy as np UpperCamelCase_ = [ ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], ["q", "r", "s", "t", "u"], ["v", "w", "x", "y", "z"], ] class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = np.array(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = np.where(letter == self.SQUARE ) SCREAMING_SNAKE_CASE : List[Any] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.SQUARE[indexa - 1, indexa - 1] return letter def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = message.lower() SCREAMING_SNAKE_CASE : List[Any] = message.replace(' ', '' ) SCREAMING_SNAKE_CASE : Optional[int] = message.replace('j', 'i' ) SCREAMING_SNAKE_CASE : Dict = np.empty((2, len(A )) ) for letter_index in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = self.letter_to_numbers(message[letter_index] ) SCREAMING_SNAKE_CASE : Tuple = numbers[0] SCREAMING_SNAKE_CASE : str = numbers[1] SCREAMING_SNAKE_CASE : Any = first_step.reshape(2 * len(A ) ) SCREAMING_SNAKE_CASE : Tuple = '' for numbers_index in range(len(A ) ): SCREAMING_SNAKE_CASE : List[Any] = int(second_step[numbers_index * 2] ) SCREAMING_SNAKE_CASE : Dict = int(second_step[(numbers_index * 2) + 1] ) SCREAMING_SNAKE_CASE : Optional[Any] = self.numbers_to_letter(A, A ) SCREAMING_SNAKE_CASE : Dict = encoded_message + letter return encoded_message def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = message.lower() message.replace(' ', '' ) SCREAMING_SNAKE_CASE : int = np.empty(2 * len(A ) ) for letter_index in range(len(A ) ): SCREAMING_SNAKE_CASE : Tuple = self.letter_to_numbers(message[letter_index] ) SCREAMING_SNAKE_CASE : str = numbers[0] SCREAMING_SNAKE_CASE : Dict = numbers[1] SCREAMING_SNAKE_CASE : str = first_step.reshape((2, len(A )) ) SCREAMING_SNAKE_CASE : Any = '' for numbers_index in range(len(A ) ): SCREAMING_SNAKE_CASE : Any = int(second_step[0, numbers_index] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(second_step[1, numbers_index] ) SCREAMING_SNAKE_CASE : Any = self.numbers_to_letter(A, A ) SCREAMING_SNAKE_CASE : Dict = decoded_message + letter return decoded_message
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_longformer": [ "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerOnnxConfig", ], "tokenization_longformer": ["LongformerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["LongformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "LongformerForMaskedLM", "LongformerForMultipleChoice", "LongformerForQuestionAnswering", "LongformerForSequenceClassification", "LongformerForTokenClassification", "LongformerModel", "LongformerPreTrainedModel", "LongformerSelfAttention", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", "TFLongformerForSequenceClassification", "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerPreTrainedModel", "TFLongformerSelfAttention", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' from math import ceil, sqrt def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for outer_width in range(3 ,(limit // 4) + 2 ): if outer_width**2 > limit: SCREAMING_SNAKE_CASE : Tuple = max(ceil(sqrt(outer_width**2 - limit ) ) ,1 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _a ( yaml.SafeLoader ): '''simple docstring''' def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [self.constructed_objects[key_node] for key_node, _ in node.value] SCREAMING_SNAKE_CASE : List[Any] = [tuple(A ) if isinstance(A, A ) else key for key in keys] SCREAMING_SNAKE_CASE : List[str] = Counter(A ) SCREAMING_SNAKE_CASE : Tuple = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = super().construct_mapping(A, deep=A ) self._check_no_duplicates_on_constructed_node(A ) return mapping def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: SCREAMING_SNAKE_CASE : Any = full_content[1:].index('---' ) + 1 SCREAMING_SNAKE_CASE : Tuple = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__UpperCamelCase ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase_ ( cls, A ): '''simple docstring''' with open(A, encoding='utf-8' ) as readme_file: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A ) else: return cls() def UpperCamelCase_ ( self, A ): '''simple docstring''' if path.exists(): with open(A, encoding='utf-8' ) as readme_file: SCREAMING_SNAKE_CASE : Optional[Any] = readme_file.read() else: SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : List[str] = self._to_readme(A ) with open(A, 'w', encoding='utf-8' ) as readme_file: readme_file.write(A ) def UpperCamelCase_ ( self, A = None ): '''simple docstring''' if readme_content is not None: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _split_yaml_from_readme(A ) SCREAMING_SNAKE_CASE : Optional[int] = '---\n' + self.to_yaml_string() + '---\n' + content else: SCREAMING_SNAKE_CASE : Any = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def UpperCamelCase_ ( cls, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = yaml.load(A, Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields SCREAMING_SNAKE_CASE : Union[str, Any] = { (key.replace('-', '_' ) if key.replace('-', '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A ) def UpperCamelCase_ ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_', '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() }, sort_keys=A, allow_unicode=A, encoding='utf-8', ).decode('utf-8' ) UpperCamelCase_ = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCamelCase_ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.") ap.add_argument("readme_filepath") UpperCamelCase_ = ap.parse_args() UpperCamelCase_ = Path(args.readme_filepath) UpperCamelCase_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[Any] = VideoToVideoSDPipeline A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''} A : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''} A : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} A : Optional[Any] = False # No `output_type`. A : Tuple = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'), up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'), cross_attention_dim=32, attention_head_dim=4, ) SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : List[Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = VideoToVideoSDPipeline(**A ) SCREAMING_SNAKE_CASE : int = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = 'np' SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**A ).frames SCREAMING_SNAKE_CASE : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) SCREAMING_SNAKE_CASE : Dict = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def UpperCamelCase_ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A, expected_max_diff=5E-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL', torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='cpu' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = torch.randn((1, 10, 3, 1_024, 576), generator=A ) SCREAMING_SNAKE_CASE : Dict = video.to('cuda' ) SCREAMING_SNAKE_CASE : Tuple = 'Spiderman is surfing' SCREAMING_SNAKE_CASE : int = pipe(A, video=A, generator=A, num_inference_steps=3, output_type='pt' ).frames SCREAMING_SNAKE_CASE : Dict = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [1] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = 0, 0, 0 SCREAMING_SNAKE_CASE : Dict = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE : Optional[int] = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE : Any = ugly_nums[ia] * 5 for _ in range(1 ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : int = min(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ugly_nums.append(__UpperCamelCase ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE : Tuple = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE : Dict = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"""{ugly_numbers(2_0_0) = }""")
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' import importlib.metadata import operator import re import sys from typing import Optional from packaging import version UpperCamelCase_ = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str ,__UpperCamelCase: int ): """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" f" reinstalling {pkg}." ) if not ops[op](version.parse(__UpperCamelCase ) ,version.parse(__UpperCamelCase ) ): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Optional[str] = None ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = f"\n{hint}" if hint is not None else '' # non-versioned check if re.match(r'^[\w_\-\d]+$' ,__UpperCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = requirement, None, None else: SCREAMING_SNAKE_CASE : List[Any] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' ,__UpperCamelCase ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but' f" got {requirement}" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = match[0] SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(',' ) # there could be multiple requirements SCREAMING_SNAKE_CASE : List[str] = {} for w in want_range: SCREAMING_SNAKE_CASE : List[Any] = re.findall(r'^([\s!=<>]{1,2})(.+)' ,__UpperCamelCase ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,' f" but got {requirement}" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = match[0] SCREAMING_SNAKE_CASE : Optional[int] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": SCREAMING_SNAKE_CASE : Dict = '.'.join([str(__UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return # check if any version is installed try: SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.metadata.version(__UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main' return require_version(__UpperCamelCase ,__UpperCamelCase )
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' ) SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' ) # Register commands ConvertCommand.register_subcommand(__UpperCamelCase ) DownloadCommand.register_subcommand(__UpperCamelCase ) EnvironmentCommand.register_subcommand(__UpperCamelCase ) RunCommand.register_subcommand(__UpperCamelCase ) ServeCommand.register_subcommand(__UpperCamelCase ) UserCommands.register_subcommand(__UpperCamelCase ) AddNewModelCommand.register_subcommand(__UpperCamelCase ) AddNewModelLikeCommand.register_subcommand(__UpperCamelCase ) LfsCommands.register_subcommand(__UpperCamelCase ) PTtoTFCommand.register_subcommand(__UpperCamelCase ) # Let's go SCREAMING_SNAKE_CASE : int = parser.parse_args() if not hasattr(__UpperCamelCase ,'func' ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE : List[str] = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class _a ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(A ): SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(A ) self.assertIsNotNone(A ) self.assertIsInstance(A, A ) SCREAMING_SNAKE_CASE : Tuple = FlaxAutoModel.from_pretrained(A ) self.assertIsNotNone(A ) self.assertIsInstance(A, A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(A ): SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(A ) self.assertIsNotNone(A ) self.assertIsInstance(A, A ) SCREAMING_SNAKE_CASE : str = FlaxAutoModel.from_pretrained(A ) self.assertIsNotNone(A ) self.assertIsInstance(A, A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(A ) SCREAMING_SNAKE_CASE : List[str] = FlaxBertModel.from_pretrained(A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX ) @jax.jit def eval(**A ): return model(**A ) eval(**A ).block_until_ready() @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = FlaxRobertaModel.from_pretrained(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX ) @jax.jit def eval(**A ): return model(**A ) eval(**A ).block_until_ready() def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaisesRegex( A, 'bert-base is not a local folder and is not a valid model identifier' ): SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained('bert-base' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaisesRegex( A, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained(A, revision='aaaaaa' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaisesRegex( A, 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack', ): SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaisesRegex(A, 'Use `from_pt=True` to load this model' ): SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' import logging import os from .state import PartialState class _a ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def UpperCamelCase_ ( self, A, A, *A, **A ): '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) SCREAMING_SNAKE_CASE : int = kwargs.pop('main_process_only', A ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('in_order', A ) if self.isEnabledFor(A ): if self._should_log(A ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.process(A, A ) self.logger.log(A, A, *A, **A ) elif in_order: SCREAMING_SNAKE_CASE : List[str] = PartialState() for i in range(state.num_processes ): if i == state.process_index: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.process(A, A ) self.logger.log(A, A, *A, **A ) state.wait_for_everyone() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str = None ): """simple docstring""" if log_level is None: SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__UpperCamelCase ,{} )
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = '''roformer''' def __init__( self, A=50_000, A=None, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_536, A=2, A=0.02, A=1E-12, A=0, A=False, A=True, **A, ): '''simple docstring''' super().__init__(pad_token_id=A, **A ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = type_vocab_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = rotary_value SCREAMING_SNAKE_CASE : str = use_cache class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def UpperCamelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'sequence'} SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' import os import sys import unittest UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCamelCase_ = os.path.join(git_repo_path, "src", "diffusers") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = find_backend(' if not is_torch_available():' ) self.assertEqual(A, 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") SCREAMING_SNAKE_CASE : Dict = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(A, 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") SCREAMING_SNAKE_CASE : Dict = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(A, 'torch_and_transformers_and_onnx' ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch', A ) self.assertIn('torch_and_transformers', A ) self.assertIn('flax_and_transformers', A ) self.assertIn('torch_and_transformers_and_onnx', A ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel', objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel', objects['flax'] ) self.assertIn('StableDiffusionPipeline', objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline', objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler', objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline', objects['torch_and_transformers_and_onnx'] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = create_dummy_object('CONSTANT', '\'torch\'' ) self.assertEqual(A, '\nCONSTANT = None\n' ) SCREAMING_SNAKE_CASE : Any = create_dummy_object('function', '\'torch\'' ) self.assertEqual( A, '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) SCREAMING_SNAKE_CASE : str = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' SCREAMING_SNAKE_CASE : Any = create_dummy_object('FakeClass', '\'torch\'' ) self.assertEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' SCREAMING_SNAKE_CASE : Tuple = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'], A )
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = '''segformer''' def __init__( self, A=3, A=4, A=[2, 2, 2, 2], A=[8, 4, 2, 1], A=[32, 64, 160, 256], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[1, 2, 5, 8], A=[4, 4, 4, 4], A="gelu", A=0.0, A=0.0, A=0.1, A=0.02, A=0.1, A=1E-6, A=256, A=255, **A, ): '''simple docstring''' super().__init__(**A ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.', A, ) SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : Optional[int] = num_encoder_blocks SCREAMING_SNAKE_CASE : int = depths SCREAMING_SNAKE_CASE : List[Any] = sr_ratios SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes SCREAMING_SNAKE_CASE : List[str] = patch_sizes SCREAMING_SNAKE_CASE : str = strides SCREAMING_SNAKE_CASE : List[Any] = mlp_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = classifier_dropout_prob SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Any = drop_path_rate SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : List[Any] = decoder_hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get('reshape_last_stage', A ) SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[int] = version.parse('''1.11''' ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return 1E-4 @property def UpperCamelCase_ ( self ): '''simple docstring''' return 12
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : torch.FloatTensor class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__( self, A = 16, A = 88, A = None, A = None, A = 1, A = 0.0, A = 32, A = None, A = False, A = None, A = "geglu", A = True, A = True, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = attention_head_dim SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE : List[str] = in_channels SCREAMING_SNAKE_CASE : Tuple = torch.nn.GroupNorm(num_groups=A, num_channels=A, eps=1E-6, affine=A ) SCREAMING_SNAKE_CASE : Dict = nn.Linear(A, A ) # 3. Define transformers blocks SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock( A, A, A, dropout=A, cross_attention_dim=A, activation_fn=A, attention_bias=A, double_self_attention=A, norm_elementwise_affine=A, ) for d in range(A ) ] ) SCREAMING_SNAKE_CASE : Any = nn.Linear(A, A ) def UpperCamelCase_ ( self, A, A=None, A=None, A=None, A=1, A=None, A = True, ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.shape SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(A, A, A, A, A ) SCREAMING_SNAKE_CASE : Any = hidden_states.permute(0, 2, 1, 3, 4 ) SCREAMING_SNAKE_CASE : Any = self.norm(A ) SCREAMING_SNAKE_CASE : Dict = hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, A, A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_in(A ) # 2. Blocks for block in self.transformer_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = block( A, encoder_hidden_states=A, timestep=A, cross_attention_kwargs=A, class_labels=A, ) # 3. Output SCREAMING_SNAKE_CASE : List[Any] = self.proj_out(A ) SCREAMING_SNAKE_CASE : str = ( hidden_states[None, None, :] .reshape(A, A, A, A, A ) .permute(0, 3, 4, 1, 2 ) .contiguous() ) SCREAMING_SNAKE_CASE : Any = hidden_states.reshape(A, A, A, A ) SCREAMING_SNAKE_CASE : Tuple = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=A )
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _a : '''simple docstring''' def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=128, A=32, A=16, A=2, A=0.02, A=3, A=4, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : List[Any] = seq_length SCREAMING_SNAKE_CASE : List[Any] = is_training SCREAMING_SNAKE_CASE : Any = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : int = use_labels SCREAMING_SNAKE_CASE : Tuple = vocab_size SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Any = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = type_vocab_size SCREAMING_SNAKE_CASE : int = type_sequence_label_size SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE : str = num_choices SCREAMING_SNAKE_CASE : str = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : int = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : int = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size], self.num_choices ) SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self ): '''simple docstring''' ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = NezhaModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = model(A, token_type_ids=A ) SCREAMING_SNAKE_CASE : Dict = model(A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaModel(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model( A, attention_mask=A, token_type_ids=A, encoder_hidden_states=A, encoder_attention_mask=A, ) SCREAMING_SNAKE_CASE : List[str] = model( A, attention_mask=A, token_type_ids=A, encoder_hidden_states=A, ) SCREAMING_SNAKE_CASE : Tuple = model(A, attention_mask=A, token_type_ids=A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = NezhaForMaskedLM(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = NezhaForNextSentencePrediction(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Dict = model( A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = NezhaForPreTraining(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : int = model( A, attention_mask=A, token_type_ids=A, labels=A, next_sentence_label=A, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = NezhaForQuestionAnswering(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = model( A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : Dict = NezhaForSequenceClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A, attention_mask=A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.num_labels SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaForTokenClassification(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : int = model(A, attention_mask=A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.num_choices SCREAMING_SNAKE_CASE : str = NezhaForMultipleChoice(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() SCREAMING_SNAKE_CASE : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() SCREAMING_SNAKE_CASE : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() SCREAMING_SNAKE_CASE : List[Any] = model( A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) A : List[str] = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) A : List[str] = True def UpperCamelCase_ ( self, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = super()._prepare_for_class(A, A, return_labels=A ) if return_labels: if model_class in get_values(A ): SCREAMING_SNAKE_CASE : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=A ) SCREAMING_SNAKE_CASE : str = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) return inputs_dict def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaModelTester(self ) SCREAMING_SNAKE_CASE : Any = ConfigTester(self, config_class=A, hidden_size=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE : int = None self.model_tester.create_and_check_model_as_decoder( A, A, A, A, A, A, A, A, A, ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[int] = NezhaModel.from_pretrained(A ) self.assertIsNotNone(A ) @slow @require_torch_gpu def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Optional[int] = model_class(config=A ) SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(A, A ) SCREAMING_SNAKE_CASE : Tuple = torch.jit.trace( A, (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A, os.path.join(A, 'bert.pt' ) ) SCREAMING_SNAKE_CASE : int = torch.jit.load(os.path.join(A, 'bert.pt' ), map_location=A ) loaded(inputs_dict['input_ids'].to(A ), inputs_dict['attention_mask'].to(A ) ) @require_torch class _a ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = model(A, attention_mask=A )[0] SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 6, 768) ) self.assertEqual(output.shape, A ) SCREAMING_SNAKE_CASE : Any = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[int] = model(A, attention_mask=A )[0] SCREAMING_SNAKE_CASE : Any = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape, A ) SCREAMING_SNAKE_CASE : str = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], A, atol=1E-4 ) )
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = "▁" UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model"} UpperCamelCase_ = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), } } UpperCamelCase_ = { "facebook/mbart-large-en-ro": 1_0_2_4, "facebook/mbart-large-cc25": 1_0_2_4, } # fmt: off UpperCamelCase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = VOCAB_FILES_NAMES A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[str] = PRETRAINED_VOCAB_FILES_MAP A : Dict = ['''input_ids''', '''attention_mask'''] A : List[int] = [] A : List[int] = [] def __init__( self, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, A = None, A=None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token SCREAMING_SNAKE_CASE : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A, eos_token=A, unk_token=A, sep_token=A, cls_token=A, pad_token=A, mask_token=A, tokenizer_file=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, sp_model_kwargs=self.sp_model_kwargs, **A, ) SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) SCREAMING_SNAKE_CASE : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE : Optional[Any] = 1 SCREAMING_SNAKE_CASE : Dict = len(self.sp_model ) SCREAMING_SNAKE_CASE : Optional[Any] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A ) } SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE : Tuple = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) SCREAMING_SNAKE_CASE : Dict = src_lang if src_lang is not None else 'en_XX' SCREAMING_SNAKE_CASE : Tuple = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto() return state def __setstate__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): SCREAMING_SNAKE_CASE : Tuple = {} SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase_ ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase_ ( self, A, A = None, A = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A, token_ids_a=A, already_has_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(self.prefix_tokens ) SCREAMING_SNAKE_CASE : int = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(A )) + suffix_ones return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self, A, A, A, A, **A ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : List[str] = self(A, add_special_tokens=A, return_tensors=A, **A ) SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : int = tgt_lang_id return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.encode(A, out_type=A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self, A ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = ''.join(A ).replace(A, ' ' ).strip() return out_string def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' if not os.path.isdir(A ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, A ) elif not os.path.isfile(self.vocab_file ): with open(A, 'wb' ) as fi: SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self, A, A = "en_XX", A = None, A = "ro_RO", **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(A, A, **A ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.lang_code_to_id[src_lang] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = [self.eos_token_id, self.cur_lang_code] def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[lang] SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Dict = [self.eos_token_id, self.cur_lang_code]
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' import numpy as np import datasets UpperCamelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n" UpperCamelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n" UpperCamelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'X': datasets.Sequence(datasets.Value('float', id='sequence' ), id='X' ), } ), ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = np.array(A ) SCREAMING_SNAKE_CASE : Tuple = np.array(A ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('Expected `X` to be a 2D vector' ) if len(reference_distribution.shape ) != 2: raise ValueError('Expected `reference_distribution` to be a 2D vector' ) if reference_distribution.shape[0] < 2: raise ValueError( 'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' ) # Get mahalanobis distance for each prediction SCREAMING_SNAKE_CASE : Any = X - np.mean(A ) SCREAMING_SNAKE_CASE : str = np.cov(reference_distribution.T ) try: SCREAMING_SNAKE_CASE : str = np.linalg.inv(A ) except np.linalg.LinAlgError: SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.pinv(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(A, A ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(A, X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ): """simple docstring""" assert x is not None assert y is not None SCREAMING_SNAKE_CASE : Dict = len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = len(__UpperCamelCase ) # declaring the array for storing the dp values SCREAMING_SNAKE_CASE : Optional[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 ,m + 1 ): for j in range(1 ,n + 1 ): SCREAMING_SNAKE_CASE : Dict = 1 if x[i - 1] == y[j - 1] else 0 SCREAMING_SNAKE_CASE : List[Any] = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match ) SCREAMING_SNAKE_CASE : Dict = '' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = m, n while i > 0 and j > 0: SCREAMING_SNAKE_CASE : Optional[Any] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: SCREAMING_SNAKE_CASE : List[Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": UpperCamelCase_ = "AGGTAB" UpperCamelCase_ = "GXTXAYB" UpperCamelCase_ = 4 UpperCamelCase_ = "GTAB" UpperCamelCase_ , UpperCamelCase_ = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { "configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", "LiltModel", "LiltPreTrainedModel", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") UpperCamelCase_ = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } UpperCamelCase_ = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } UpperCamelCase_ = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } UpperCamelCase_ = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } UpperCamelCase_ = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } UpperCamelCase_ = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } UpperCamelCase_ = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } UpperCamelCase_ = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } UpperCamelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase_ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase_ = [] UpperCamelCase_ = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] UpperCamelCase_ = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] UpperCamelCase_ = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] UpperCamelCase_ = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Tuple ): """simple docstring""" for attribute in key.split('.' ): SCREAMING_SNAKE_CASE : List[str] = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: SCREAMING_SNAKE_CASE : List[str] = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: SCREAMING_SNAKE_CASE : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": SCREAMING_SNAKE_CASE : int = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE : Dict = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE : List[str] = value elif weight_type == "bias": SCREAMING_SNAKE_CASE : str = value elif weight_type == "running_mean": SCREAMING_SNAKE_CASE : Union[str, Any] = value elif weight_type == "running_var": SCREAMING_SNAKE_CASE : Union[str, Any] = value elif weight_type == "num_batches_tracked": SCREAMING_SNAKE_CASE : Optional[Any] = value else: SCREAMING_SNAKE_CASE : Any = value logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: str ): """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [] if task == "s2t": SCREAMING_SNAKE_CASE : int = hf_model.speechta.encoder.prenet.feature_encoder SCREAMING_SNAKE_CASE : str = MAPPING_S2T SCREAMING_SNAKE_CASE : List[str] = IGNORE_KEYS_S2T elif task == "t2s": SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : Tuple = MAPPING_T2S SCREAMING_SNAKE_CASE : str = IGNORE_KEYS_T2S elif task == "s2s": SCREAMING_SNAKE_CASE : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder SCREAMING_SNAKE_CASE : List[str] = MAPPING_S2S SCREAMING_SNAKE_CASE : Optional[Any] = IGNORE_KEYS_S2S else: raise ValueError(f"Unsupported task: {task}" ) for name, value in fairseq_dict.items(): if should_ignore(__UpperCamelCase ,__UpperCamelCase ): logger.info(f"{name} was ignored" ) continue SCREAMING_SNAKE_CASE : str = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == 'group' ,) SCREAMING_SNAKE_CASE : Optional[int] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = key.split('.*.' ) if prefix in name and suffix in name: SCREAMING_SNAKE_CASE : Optional[int] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: SCREAMING_SNAKE_CASE : int = True if "*" in mapped_key: SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(__UpperCamelCase )[0].split('.' )[-2] SCREAMING_SNAKE_CASE : Tuple = mapped_key.replace('*' ,__UpperCamelCase ) if "weight_g" in name: SCREAMING_SNAKE_CASE : str = 'weight_g' elif "weight_v" in name: SCREAMING_SNAKE_CASE : Any = 'weight_v' elif "bias" in name: SCREAMING_SNAKE_CASE : Any = 'bias' elif "weight" in name: SCREAMING_SNAKE_CASE : Any = 'weight' elif "running_mean" in name: SCREAMING_SNAKE_CASE : Tuple = 'running_mean' elif "running_var" in name: SCREAMING_SNAKE_CASE : Optional[Any] = 'running_var' elif "num_batches_tracked" in name: SCREAMING_SNAKE_CASE : Dict = 'num_batches_tracked' else: SCREAMING_SNAKE_CASE : Tuple = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"Unused weights: {unused_weights}" ) def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: List[Any] ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : str = full_name.split('conv_layers.' )[-1] SCREAMING_SNAKE_CASE : int = name.split('.' ) SCREAMING_SNAKE_CASE : Any = int(items[0] ) SCREAMING_SNAKE_CASE : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) SCREAMING_SNAKE_CASE : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) SCREAMING_SNAKE_CASE : Union[str, Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) SCREAMING_SNAKE_CASE : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) SCREAMING_SNAKE_CASE : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: int=None ,__UpperCamelCase: Optional[Any]=None ,__UpperCamelCase: int=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : Any = SpeechTaConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaConfig() if task == "s2t": SCREAMING_SNAKE_CASE : Optional[Any] = config.max_text_positions SCREAMING_SNAKE_CASE : List[Any] = SpeechTaForSpeechToText(__UpperCamelCase ) elif task == "t2s": SCREAMING_SNAKE_CASE : Optional[Any] = 18_76 SCREAMING_SNAKE_CASE : Optional[int] = 6_00 SCREAMING_SNAKE_CASE : List[Any] = config.max_speech_positions SCREAMING_SNAKE_CASE : List[str] = SpeechTaForTextToSpeech(__UpperCamelCase ) elif task == "s2s": SCREAMING_SNAKE_CASE : Optional[int] = 18_76 SCREAMING_SNAKE_CASE : int = config.max_speech_positions SCREAMING_SNAKE_CASE : Any = SpeechTaForSpeechToSpeech(__UpperCamelCase ) else: raise ValueError(f"Unknown task name: {task}" ) if vocab_path: SCREAMING_SNAKE_CASE : Any = SpeechTaTokenizer(__UpperCamelCase ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : str = AddedToken('<mask>' ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE : Dict = SpeechTaProcessor(tokenizer=__UpperCamelCase ,feature_extractor=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.load(__UpperCamelCase ) recursively_load_weights(fairseq_checkpoint['model'] ,__UpperCamelCase ,__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(__UpperCamelCase ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"], "tokenization_convbert": ["ConvBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["ConvBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvBertForMaskedLM", "ConvBertForMultipleChoice", "ConvBertForQuestionAnswering", "ConvBertForSequenceClassification", "ConvBertForTokenClassification", "ConvBertLayer", "ConvBertModel", "ConvBertPreTrainedModel", "load_tf_weights_in_convbert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", "TFConvBertLayer", "TFConvBertModel", "TFConvBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' from math import isqrt def lowercase__( __UpperCamelCase: int ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 ,isqrt(__UpperCamelCase ) + 1 ) ) def lowercase__( __UpperCamelCase: int = 10**6 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[Any] = 1 SCREAMING_SNAKE_CASE : List[Any] = 7 while prime_candidate < max_prime: primes_count += is_prime(__UpperCamelCase ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A, A = None, A = None, A = False, A = False, A = None, A = None, **A, ): '''simple docstring''' super().__init__( features=A, cache_dir=A, keep_in_memory=A, streaming=A, num_proc=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = Generator( cache_dir=A, features=A, generator=A, gen_kwargs=A, **A, ) def UpperCamelCase_ ( self ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : Optional[Any] = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[int] = None self.builder.download_and_prepare( download_config=A, download_mode=A, verification_mode=A, base_path=A, num_proc=self.num_proc, ) SCREAMING_SNAKE_CASE : List[str] = self.builder.as_dataset( split='train', verification_mode=A, in_memory=self.keep_in_memory ) return dataset
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging UpperCamelCase_ = logging.get_logger(__name__) def lowercase__( __UpperCamelCase: Union[tf.Tensor, np.ndarray] ): """simple docstring""" if isinstance(__UpperCamelCase ,np.ndarray ): return list(tensor.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.shape(__UpperCamelCase ) if tensor.shape == tf.TensorShape(__UpperCamelCase ): return dynamic SCREAMING_SNAKE_CASE : Union[str, Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__UpperCamelCase )] def lowercase__( __UpperCamelCase: tf.Tensor ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[str] = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 ,axis=__UpperCamelCase ,name=__UpperCamelCase ) def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[Any]=1e-5 ,__UpperCamelCase: Dict=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = tf.nn.moments(__UpperCamelCase ,axes=[axis] ,keepdims=__UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis SCREAMING_SNAKE_CASE : Any = [1] * inputs.shape.rank SCREAMING_SNAKE_CASE : str = shape_list(__UpperCamelCase )[axis] SCREAMING_SNAKE_CASE : Optional[int] = tf.reshape(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = tf.reshape(__UpperCamelCase ,__UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. SCREAMING_SNAKE_CASE : Optional[int] = tf.nn.batch_normalization( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,offset=__UpperCamelCase ,scale=__UpperCamelCase ,variance_epsilon=__UpperCamelCase ,) return outputs def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str=0 ,__UpperCamelCase: Optional[Any]=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input SCREAMING_SNAKE_CASE : Union[str, Any] = tf.shape(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) SCREAMING_SNAKE_CASE : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 ) return tf.reshape(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: tf.Tensor ): """simple docstring""" if not isinstance(__UpperCamelCase ,tf.Tensor ): SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(__UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: SCREAMING_SNAKE_CASE : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) SCREAMING_SNAKE_CASE : Optional[int] = ( tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__( __UpperCamelCase: tf.Tensor ,__UpperCamelCase: int ,__UpperCamelCase: str = "input_ids" ): """simple docstring""" tf.debugging.assert_less( __UpperCamelCase ,tf.cast(__UpperCamelCase ,dtype=tensor.dtype ) ,message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(__UpperCamelCase )}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) ,) def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. SCREAMING_SNAKE_CASE : int = [x for x in data if len(__UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) SCREAMING_SNAKE_CASE : List[str] = np.asarray(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : Tuple = np.array_split(__UpperCamelCase ,__UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 SCREAMING_SNAKE_CASE : List[Any] = np.array_split(__UpperCamelCase ,__UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = chunk_data else: SCREAMING_SNAKE_CASE : str = data def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: List[str] ): """simple docstring""" if name in group.attrs: SCREAMING_SNAKE_CASE : Union[str, Any] = [n.decode('utf8' ) if hasattr(__UpperCamelCase ,'decode' ) else n for n in group.attrs[name]] else: SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : str = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(__UpperCamelCase ,'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" def _expand_single_ad_tensor(__UpperCamelCase: int ): if isinstance(__UpperCamelCase ,tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__UpperCamelCase ,axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor ,__UpperCamelCase )
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": UpperCamelCase_ = input("Enter image url: ").strip() print(F"""Downloading image from {url} ...""") UpperCamelCase_ = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image UpperCamelCase_ = soup.find("meta", {"property": "og:image"})["content"] UpperCamelCase_ = requests.get(image_url).content UpperCamelCase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, "wb") as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: str ): """simple docstring""" return [ord(__UpperCamelCase ) - 96 for elem in plain] def lowercase__( __UpperCamelCase: list[int] ): """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = encode(input('-> ' ).strip().lower() ) print('Encoded: ' ,__UpperCamelCase ) print('Decoded:' ,decode(__UpperCamelCase ) ) if __name__ == "__main__": main()
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class _a ( nn.Module ): '''simple docstring''' A : int A : jnp.dtype = jnp.floataa def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.shape SCREAMING_SNAKE_CASE : Optional[int] = jax.image.resize( A, shape=(batch, height * 2, width * 2, channels), method='nearest', ) SCREAMING_SNAKE_CASE : Optional[int] = self.conv(A ) return hidden_states class _a ( nn.Module ): '''simple docstring''' A : int A : jnp.dtype = jnp.floataa def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.conv(A ) return hidden_states class _a ( nn.Module ): '''simple docstring''' A : int A : int = None A : float = 0.0 A : bool = None A : jnp.dtype = jnp.floataa def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.in_channels if self.out_channels is None else self.out_channels SCREAMING_SNAKE_CASE : Optional[Any] = nn.GroupNorm(num_groups=32, epsilon=1E-5 ) SCREAMING_SNAKE_CASE : int = nn.Conv( A, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) SCREAMING_SNAKE_CASE : Tuple = nn.Dense(A, dtype=self.dtype ) SCREAMING_SNAKE_CASE : Dict = nn.GroupNorm(num_groups=32, epsilon=1E-5 ) SCREAMING_SNAKE_CASE : Any = nn.Dropout(self.dropout_prob ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv( A, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) SCREAMING_SNAKE_CASE : List[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut SCREAMING_SNAKE_CASE : Tuple = None if use_nin_shortcut: SCREAMING_SNAKE_CASE : str = nn.Conv( A, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype, ) def __call__( self, A, A, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_states SCREAMING_SNAKE_CASE : Optional[Any] = self.norma(A ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.swish(A ) SCREAMING_SNAKE_CASE : Tuple = self.conva(A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.time_emb_proj(nn.swish(A ) ) SCREAMING_SNAKE_CASE : int = jnp.expand_dims(jnp.expand_dims(A, 1 ), 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states + temb SCREAMING_SNAKE_CASE : Optional[Any] = self.norma(A ) SCREAMING_SNAKE_CASE : List[str] = nn.swish(A ) SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(A, A ) SCREAMING_SNAKE_CASE : Any = self.conva(A ) if self.conv_shortcut is not None: SCREAMING_SNAKE_CASE : str = self.conv_shortcut(A ) return hidden_states + residual
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase_ = {ord(char) for char in VALID_CHARS} UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: tuple[int, ...] ): """simple docstring""" SCREAMING_SNAKE_CASE : str = "" SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int for keychar, cipherchar in zip(cycle(__UpperCamelCase ) ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : Union[str, Any] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__UpperCamelCase ) return decoded def lowercase__( __UpperCamelCase: list[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : list[str] = [] for key in product(__UpperCamelCase ,repeat=3 ): SCREAMING_SNAKE_CASE : List[str] = try_key(__UpperCamelCase ,__UpperCamelCase ) if encoded is not None: possibles.append(__UpperCamelCase ) return possibles def lowercase__( __UpperCamelCase: list[str] ,__UpperCamelCase: str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def lowercase__( __UpperCamelCase: str = "p059_cipher.txt" ): """simple docstring""" SCREAMING_SNAKE_CASE : list[int] SCREAMING_SNAKE_CASE : list[str] SCREAMING_SNAKE_CASE : str SCREAMING_SNAKE_CASE : str SCREAMING_SNAKE_CASE : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='utf-8' ) SCREAMING_SNAKE_CASE : Tuple = [int(__UpperCamelCase ) for number in data.strip().split(',' )] SCREAMING_SNAKE_CASE : Dict = filter_valid_chars(__UpperCamelCase ) for common_word in COMMON_WORDS: SCREAMING_SNAKE_CASE : List[Any] = filter_common_word(__UpperCamelCase ,__UpperCamelCase ) if len(__UpperCamelCase ) == 1: break SCREAMING_SNAKE_CASE : Optional[int] = possibles[0] return sum(ord(__UpperCamelCase ) for char in decoded_text ) if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class _a ( unittest.TestCase , SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = load_tool('text-question-answering' ) self.tool.setup() SCREAMING_SNAKE_CASE : Optional[Any] = load_tool('text-question-answering', remote=A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.tool(A, 'What did Hugging Face do in April 2021?' ) self.assertEqual(A, 'launched the BigScience Research Workshop' ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.remote_tool(A, 'What did Hugging Face do in April 2021?' ) self.assertEqual(A, 'launched the BigScience Research Workshop' ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.tool(text=A, question='What did Hugging Face do in April 2021?' ) self.assertEqual(A, 'launched the BigScience Research Workshop' ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.remote_tool(text=A, question='What did Hugging Face do in April 2021?' ) self.assertEqual(A, 'launched the BigScience Research Workshop' )
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCamelCase_ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n" UpperCamelCase_ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n" UpperCamelCase_ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Any ): """simple docstring""" return float((preds == labels).mean() ) def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Union[str, Any]="binary" ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average=__UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = {} for id_pred, label in zip(__UpperCamelCase ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : str = f"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}" SCREAMING_SNAKE_CASE : Any = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: SCREAMING_SNAKE_CASE : Optional[Any] = [(pred, label)] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = [], [] for question, preds_labels in question_map.items(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = zip(*__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average='macro' ) fas.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) ) ems.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = sum(__UpperCamelCase ) / len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = float(fa_score(y_true=__UpperCamelCase ,y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None, ) def UpperCamelCase_ ( self ): '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(A, A )} elif self.config_name == "cb": return acc_and_fa(A, A, fa_avg='macro' ) elif self.config_name == "record": SCREAMING_SNAKE_CASE : List[Any] = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] SCREAMING_SNAKE_CASE : Any = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(A, A )[0] elif self.config_name == "multirc": return evaluate_multirc(A, A ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(A, A )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Union[str, Any] = None A : int = None @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(A, 'feature_size' ) ) self.assertTrue(hasattr(A, 'sampling_rate' ) ) self.assertTrue(hasattr(A, 'padding_value' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Tuple = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(A ) == len(A ) for x, y in zip(A, processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A ) SCREAMING_SNAKE_CASE : int = BatchFeature({input_name: speech_inputs}, tensor_type='np' ) SCREAMING_SNAKE_CASE : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Dict = BatchFeature({input_name: speech_inputs}, tensor_type='pt' ) SCREAMING_SNAKE_CASE : int = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A ) SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Tuple = BatchFeature({input_name: speech_inputs}, tensor_type='tf' ) SCREAMING_SNAKE_CASE : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Tuple = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def UpperCamelCase_ ( self, A=False ): '''simple docstring''' def _inputs_have_equal_length(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = len(input[0] ) for input_slice in input[1:]: if len(A ) != length: return False return True def _inputs_are_equal(A, A ): if len(A ) != len(A ): return False for input_slice_a, input_slice_a in zip(A, A ): if not np.allclose(np.asarray(A ), np.asarray(A ), atol=1E-3 ): return False return True SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : int = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : int = self.feat_extract_tester.seq_length_diff SCREAMING_SNAKE_CASE : Optional[Any] = self.feat_extract_tester.max_seq_length + pad_diff SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_tester.min_seq_length SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_tester.batch_size SCREAMING_SNAKE_CASE : List[str] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy SCREAMING_SNAKE_CASE : Tuple = feat_extract.pad(A, padding=A ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] SCREAMING_SNAKE_CASE : Tuple = feat_extract.pad(A, padding='longest' ) SCREAMING_SNAKE_CASE : Optional[int] = input_a[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='max_length', max_length=len(speech_inputs[-1] ) ) SCREAMING_SNAKE_CASE : Optional[Any] = input_a[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='longest', return_tensors='np' ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(A ): feat_extract.pad(A, padding='max_length' )[input_name] SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad( A, padding='max_length', max_length=A, return_tensors='np' ) SCREAMING_SNAKE_CASE : Optional[Any] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(A ) ) self.assertTrue(_inputs_have_equal_length(A ) ) self.assertTrue(_inputs_have_equal_length(A ) ) self.assertTrue(_inputs_are_equal(A, A ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE : Any = feat_extract.pad(A, pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE : int = input_a[input_name] SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(A, padding='longest', pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( A, padding='max_length', pad_to_multiple_of=10, max_length=A ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] SCREAMING_SNAKE_CASE : str = feat_extract.pad( A, padding='max_length', pad_to_multiple_of=10, max_length=A, return_tensors='np', ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] self.assertTrue(all(len(A ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(A, A ) ) SCREAMING_SNAKE_CASE : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(A ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2], (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct SCREAMING_SNAKE_CASE : Union[str, Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def UpperCamelCase_ ( self, A=False ): '''simple docstring''' def _inputs_have_equal_length(A ): SCREAMING_SNAKE_CASE : Tuple = len(input[0] ) for input_slice in input[1:]: if len(A ) != length: return False return True def _inputs_are_equal(A, A ): if len(A ) != len(A ): return False for input_slice_a, input_slice_a in zip(A, A ): if not np.allclose(np.asarray(A ), np.asarray(A ), atol=1E-3 ): return False return True SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=A ) SCREAMING_SNAKE_CASE : Dict = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[0] ), truncation=A ) SCREAMING_SNAKE_CASE : Optional[int] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad(A, padding='max_length', max_length=len(speech_inputs[0] ) ) SCREAMING_SNAKE_CASE : int = input_a[input_name] self.assertTrue(_inputs_have_equal_length(A ) ) self.assertFalse(_inputs_have_equal_length(A ) ) # truncate to smallest with np SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[0] ), return_tensors='np', truncation=A, ) SCREAMING_SNAKE_CASE : Any = input_a[input_name] SCREAMING_SNAKE_CASE : str = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[0] ), return_tensors='np' ) SCREAMING_SNAKE_CASE : str = input_a[input_name] self.assertTrue(_inputs_have_equal_length(A ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(A ) ) # truncate to middle SCREAMING_SNAKE_CASE : Any = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[1] ), truncation=A, return_tensors='np', ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : Any = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[1] ), truncation=A ) SCREAMING_SNAKE_CASE : Optional[int] = input_a[input_name] SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[1] ), return_tensors='np' ) SCREAMING_SNAKE_CASE : Optional[int] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(A ) ) self.assertTrue(_inputs_have_equal_length(A ) ) self.assertTrue(_inputs_are_equal(A, A ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(A ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(A ): feat_extract.pad(A, truncation=A )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(A ): feat_extract.pad(A, padding='longest', truncation=A )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(A ): feat_extract.pad(A, padding='longest', truncation=A )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(A ): feat_extract.pad(A, padding='max_length', truncation=A )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Tuple = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[0] ), pad_to_multiple_of=A, truncation=A, ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad( A, padding='max_length', max_length=len(speech_inputs[0] ), pad_to_multiple_of=A, ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of SCREAMING_SNAKE_CASE : Dict = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: SCREAMING_SNAKE_CASE : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(A ) ) self.assertFalse(_inputs_have_equal_length(A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' self._check_padding(numpify=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self._check_padding(numpify=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self._check_truncation(numpify=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self._check_truncation(numpify=A ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Any = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Any = feat_extract.pad(A, padding='longest', return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='longest', return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='longest', return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='longest', return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_dict SCREAMING_SNAKE_CASE : Dict = True SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**A ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Union[str, Any] = [len(A ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : List[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='longest', return_tensors='np' ) self.assertIn('attention_mask', A ) self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_dict SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**A ) SCREAMING_SNAKE_CASE : List[str] = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Tuple = [len(A ) for x in speech_inputs] SCREAMING_SNAKE_CASE : int = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : List[str] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Dict = min(A ) SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad( A, padding='max_length', max_length=A, truncation=A, return_tensors='np' ) self.assertIn('attention_mask', A ) self.assertListEqual( list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowercase__( __UpperCamelCase: Any ): """simple docstring""" config.addinivalue_line( 'markers' ,'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' ,'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' ,'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' ,'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' ,'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' ,'tool_tests: mark the tool tests that are run on their specific schedule' ) def lowercase__( __UpperCamelCase: List[str] ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCamelCase ) def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE : str = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(__UpperCamelCase ,id=__UpperCamelCase ) def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[Any] ): """simple docstring""" if exitstatus == 5: SCREAMING_SNAKE_CASE : Any = 0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("IGNORE_RESULT") UpperCamelCase_ = doctest.OutputChecker class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, A, A, A ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' super().__init__(*A, **A ) requires_backends(self, 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def UpperCamelCase_ ( self, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {} if top_k is not None: SCREAMING_SNAKE_CASE : Optional[Any] = top_k return {}, {}, postprocess_params def __call__( self, A, **A ): '''simple docstring''' return super().__call__(A, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = load_image(A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(images=A, return_tensors=self.framework ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: SCREAMING_SNAKE_CASE : int = self.model.config.num_labels if self.framework == "pt": SCREAMING_SNAKE_CASE : str = model_outputs.logits.softmax(-1 )[0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = probs.topk(A ) elif self.framework == "tf": SCREAMING_SNAKE_CASE : Optional[Any] = stable_softmax(model_outputs.logits, axis=-1 )[0] SCREAMING_SNAKE_CASE : int = tf.math.top_k(A, k=A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"Unsupported framework: {self.framework}" ) SCREAMING_SNAKE_CASE : Optional[int] = scores.tolist() SCREAMING_SNAKE_CASE : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A, A )]
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A, A ): '''simple docstring''' super().__init__() self.register_modules(unet=A, scheduler=A ) @torch.no_grad() def __call__( self, A = 1, A = None, A = 50, A = "pil", A = True, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=A, ) SCREAMING_SNAKE_CASE : Dict = image.to(self.device ) # set step values self.scheduler.set_timesteps(A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output SCREAMING_SNAKE_CASE : List[Any] = self.unet(A, A ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(A, A, A ).prev_sample SCREAMING_SNAKE_CASE : str = (image / 2 + 0.5).clamp(0, 1 ) SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(A ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=A ), "This is a local test"
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: list[int] ,__UpperCamelCase: list[int] ,__UpperCamelCase: list[list[str]] ,__UpperCamelCase: int ,): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = len(__UpperCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__UpperCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,__UpperCamelCase ,__UpperCamelCase ,) def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : list[list[str]] = [] depth_first_search([] ,[] ,[] ,__UpperCamelCase ,__UpperCamelCase ) # Print all the boards for board in boards: for column in board: print(__UpperCamelCase ) print('' ) print(len(__UpperCamelCase ) ,'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(A ) self.assertTrue(isinstance(dc.token_ids, A ) ) with self.assertRaises(A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(A ): DisjunctiveConstraint(A ) # fails here def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [[1, 2, 3], [1, 2, 4]] SCREAMING_SNAKE_CASE : List[str] = DisjunctiveConstraint(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = dc.update(1 ) SCREAMING_SNAKE_CASE : Dict = stepped is True and completed is False and reset is False self.assertTrue(A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 ) SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 ) SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github UpperCamelCase_ = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = Github(os.environ['GITHUB_TOKEN'] ) SCREAMING_SNAKE_CASE : Tuple = g.get_repo('huggingface/transformers' ) SCREAMING_SNAKE_CASE : List[Any] = repo.get_issues(state='open' ) for issue in open_issues: SCREAMING_SNAKE_CASE : Any = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = comments[0] if len(__UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' ,[False, True] ) def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Tuple ,__UpperCamelCase: Any ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tmp_path / 'cache' SCREAMING_SNAKE_CASE : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read() _check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( 'features' ,[ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] ,) def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tmp_path / 'cache' SCREAMING_SNAKE_CASE : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] ) def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = tmp_path / 'cache' SCREAMING_SNAKE_CASE : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,split=__UpperCamelCase ).read() _check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' ,[str, list] ) def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ): """simple docstring""" if issubclass(__UpperCamelCase ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : int = parquet_path elif issubclass(__UpperCamelCase ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : Dict = [parquet_path] SCREAMING_SNAKE_CASE : Tuple = tmp_path / 'cache' SCREAMING_SNAKE_CASE : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: str=("train",) ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) for split in splits: SCREAMING_SNAKE_CASE : Dict = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' ,[False, True] ) def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / 'cache' SCREAMING_SNAKE_CASE : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader( {'train': parquet_path} ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read() _check_parquet_datasetdict(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( 'features' ,[ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] ,) def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = tmp_path / 'cache' SCREAMING_SNAKE_CASE : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} SCREAMING_SNAKE_CASE : int = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE : Union[str, Any] = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader({'train': parquet_path} ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_parquet_datasetdict(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] ) def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" if split: SCREAMING_SNAKE_CASE : Optional[Any] = {split: parquet_path} else: SCREAMING_SNAKE_CASE : Union[str, Any] = 'train' SCREAMING_SNAKE_CASE : List[Any] = {'train': parquet_path, 'test': parquet_path} SCREAMING_SNAKE_CASE : List[Any] = tmp_path / 'cache' SCREAMING_SNAKE_CASE : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_parquet_datasetdict(__UpperCamelCase ,__UpperCamelCase ,splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : str = ParquetDatasetWriter(__UpperCamelCase ,tmp_path / 'foo.parquet' ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : Any = pq.ParquetFile(tmp_path / 'foo.parquet' ) SCREAMING_SNAKE_CASE : List[str] = pf.read() assert dataset.data.table == output_table def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : str = str(shared_datadir / 'test_image_rgb.jpg' ) SCREAMING_SNAKE_CASE : Any = {'image': [image_path]} SCREAMING_SNAKE_CASE : Union[str, Any] = Features({'image': Image()} ) SCREAMING_SNAKE_CASE : Any = Dataset.from_dict(__UpperCamelCase ,features=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetWriter(__UpperCamelCase ,tmp_path / 'foo.parquet' ) assert writer.write() > 0 SCREAMING_SNAKE_CASE : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) ,streaming=__UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' ,[ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] ,) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ): """simple docstring""" assert get_writer_batch_size(__UpperCamelCase ) == expected
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "Salesforce/codegen-350M-mono": 2_0_4_8, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[str] = ['''input_ids''', '''attention_mask'''] A : Optional[int] = CodeGenTokenizer def __init__( self, A=None, A=None, A=None, A="<|endoftext|>", A="<|endoftext|>", A="<|endoftext|>", A=False, **A, ): '''simple docstring''' super().__init__( A, A, tokenizer_file=A, unk_token=A, bos_token=A, eos_token=A, add_prefix_space=A, **A, ) if kwargs.pop('add_bos_token', A ): SCREAMING_SNAKE_CASE : str = kwargs.pop('name_or_path', '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', A ) != add_prefix_space: SCREAMING_SNAKE_CASE : Optional[int] = getattr(A, pre_tok_state.pop('type' ) ) SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space SCREAMING_SNAKE_CASE : Optional[Any] = pre_tok_class(**A ) SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space def UpperCamelCase_ ( self, *A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('is_split_into_words', A ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A, **A ) def UpperCamelCase_ ( self, *A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = kwargs.get('is_split_into_words', A ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*A, **A ) def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._tokenizer.model.save(A, name=A ) return tuple(A ) def UpperCamelCase_ ( self, A, A = False, A = None, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = super().decode( token_ids=A, skip_special_tokens=A, clean_up_tokenization_spaces=A, **A, ) if truncate_before_pattern is not None and len(A ) > 0: SCREAMING_SNAKE_CASE : List[str] = self.truncate(A, A ) return decoded_text def UpperCamelCase_ ( self, A, A ): '''simple docstring''' def find_re(A, A, A ): SCREAMING_SNAKE_CASE : Tuple = pattern.search(A, A ) return m.start() if m else -1 SCREAMING_SNAKE_CASE : List[Any] = [re.compile(A, re.MULTILINE ) for pattern in truncate_before_pattern] SCREAMING_SNAKE_CASE : str = list(re.finditer('^print', A, re.MULTILINE ) ) if len(A ) > 1: SCREAMING_SNAKE_CASE : Optional[Any] = completion[: prints[1].start()] SCREAMING_SNAKE_CASE : str = list(re.finditer('^def', A, re.MULTILINE ) ) if len(A ) > 1: SCREAMING_SNAKE_CASE : str = completion[: defs[1].start()] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = [ pos for pos in [find_re(A, A, A ) for terminal in terminals] if pos != -1 ] if len(A ) > 0: return completion[: min(A )] else: return completion
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = set(__UpperCamelCase ), [start] while stack: SCREAMING_SNAKE_CASE : Any = stack.pop() explored.add(__UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__UpperCamelCase ) return explored UpperCamelCase_ = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' import argparse from collections import defaultdict import yaml UpperCamelCase_ = "docs/source/en/_toctree.yml" def lowercase__( __UpperCamelCase: List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = defaultdict(__UpperCamelCase ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE : int = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE : Any = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE : str = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : s["title"].lower() ) def lowercase__( __UpperCamelCase: Union[str, Any]=False ): """simple docstring""" with open(__UpperCamelCase ,encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE : Optional[int] = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE : Dict = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE : Dict = content[api_idx]['sections'] # Then to the model doc SCREAMING_SNAKE_CASE : Optional[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE : Optional[int] = api_doc[model_idx]['sections'] SCREAMING_SNAKE_CASE : List[Any] = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section] SCREAMING_SNAKE_CASE : Tuple = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE : Optional[int] = modality_doc['sections'] SCREAMING_SNAKE_CASE : Optional[int] = clean_model_doc_toc(__UpperCamelCase ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE : Optional[Any] = True if overwrite: SCREAMING_SNAKE_CASE : Optional[int] = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE : Optional[int] = model_doc SCREAMING_SNAKE_CASE : List[Any] = api_doc with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(yaml.dump(__UpperCamelCase ,allow_unicode=__UpperCamelCase ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") UpperCamelCase_ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = data SCREAMING_SNAKE_CASE : List[str] = None class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.head while temp is not None: print(temp.data, end=' ' ) SCREAMING_SNAKE_CASE : Optional[int] = temp.next print() def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = Node(A ) SCREAMING_SNAKE_CASE : Optional[int] = self.head SCREAMING_SNAKE_CASE : Tuple = new_node def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if node_data_a == node_data_a: return else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: SCREAMING_SNAKE_CASE : Any = node_a.next SCREAMING_SNAKE_CASE : Tuple = self.head while node_a is not None and node_a.data != node_data_a: SCREAMING_SNAKE_CASE : Dict = node_a.next if node_a is None or node_a is None: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = node_a.data, node_a.data if __name__ == "__main__": UpperCamelCase_ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("After swapping") ll.print_list()
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' from jiwer import compute_measures import datasets UpperCamelCase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" UpperCamelCase_ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" UpperCamelCase_ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence' ), 'references': datasets.Value('string', id='sequence' ), } ), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', ], ) def UpperCamelCase_ ( self, A=None, A=None, A=False ): '''simple docstring''' if concatenate_texts: return compute_measures(A, A )["wer"] else: SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = 0 for prediction, reference in zip(A, A ): SCREAMING_SNAKE_CASE : Any = compute_measures(A, A ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCamelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = os.path.join(__UpperCamelCase ,'words.txt' ) SCREAMING_SNAKE_CASE : Any = '' with open(__UpperCamelCase ) as f: SCREAMING_SNAKE_CASE : Dict = f.readline() SCREAMING_SNAKE_CASE : Union[str, Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] SCREAMING_SNAKE_CASE : str = [ word for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__UpperCamelCase ) if __name__ == "__main__": print(solution())
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowercase__( __UpperCamelCase: Namespace ): """simple docstring""" return ConvertCommand( args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name ) UpperCamelCase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parser.add_parser( 'convert', help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.', ) train_parser.add_argument('--model_type', type=A, required=A, help='Model\'s type.' ) train_parser.add_argument( '--tf_checkpoint', type=A, required=A, help='TensorFlow checkpoint path or folder.' ) train_parser.add_argument( '--pytorch_dump_output', type=A, required=A, help='Path to the PyTorch saved model output.' ) train_parser.add_argument('--config', type=A, default='', help='Configuration file path or folder.' ) train_parser.add_argument( '--finetuning_task_name', type=A, default=A, help='Optional fine-tuning task name if the TF model was a finetuned model.', ) train_parser.set_defaults(func=A ) def __init__( self, A, A, A, A, A, *A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger('transformers-cli/converting' ) self._logger.info(F"Loading model {model_type}" ) SCREAMING_SNAKE_CASE : Any = model_type SCREAMING_SNAKE_CASE : Tuple = tf_checkpoint SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_output SCREAMING_SNAKE_CASE : Optional[int] = config SCREAMING_SNAKE_CASE : Optional[Any] = finetuning_task_name def UpperCamelCase_ ( self ): '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) if "ckpt" in self._tf_checkpoint.lower(): SCREAMING_SNAKE_CASE : Dict = self._tf_checkpoint SCREAMING_SNAKE_CASE : List[str] = '' else: SCREAMING_SNAKE_CASE : List[Any] = self._tf_checkpoint SCREAMING_SNAKE_CASE : int = '' convert_transfo_xl_checkpoint_to_pytorch( A, self._config, self._pytorch_dump_output, A ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' ,type=__UpperCamelCase ,default=1 ,help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' ,type=__UpperCamelCase ,help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) ,) # rest from the training program parser.add_argument('training_script_args' ,nargs=__UpperCamelCase ) return parser.parse_args() def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE : int = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE : Optional[int] = script_fpath.stem SCREAMING_SNAKE_CASE : str = importlib.import_module(__UpperCamelCase ) # Patch sys.argv SCREAMING_SNAKE_CASE : Dict = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ): """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) SCREAMING_SNAKE_CASE : Any = str(bin(__UpperCamelCase ) ) binary_number += "0" * shift_amount return binary_number def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ): """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) SCREAMING_SNAKE_CASE : str = str(bin(__UpperCamelCase ) )[2:] if shift_amount >= len(__UpperCamelCase ): return "0b0" SCREAMING_SNAKE_CASE : Any = binary_number[: len(__UpperCamelCase ) - shift_amount] return "0b" + shifted_binary_number def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ): """simple docstring""" if number >= 0: # Get binary representation of positive number SCREAMING_SNAKE_CASE : Union[str, Any] = '0' + str(bin(__UpperCamelCase ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number SCREAMING_SNAKE_CASE : Optional[Any] = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number SCREAMING_SNAKE_CASE : str = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:] SCREAMING_SNAKE_CASE : Union[str, Any] = ( '1' + '0' * (binary_number_length - len(__UpperCamelCase )) + binary_number ) if shift_amount >= len(__UpperCamelCase ): return "0b" + binary_number[0] * len(__UpperCamelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__UpperCamelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' self.assertEqual(len(A ), len(A ) ) for a, b in zip(A, A ): self.assertAlmostEqual(A, A, delta=A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step, 3 ) self.assertEqual(len(accumulator.gradients ), 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1E-2 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None ops.enable_eager_execution_internal() SCREAMING_SNAKE_CASE : str = tf.config.list_physical_devices('CPU' ) if len(A ) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.config.list_logical_devices(device_type='CPU' ) SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): SCREAMING_SNAKE_CASE : Optional[int] = GradientAccumulator() SCREAMING_SNAKE_CASE : Optional[int] = tf.Variable([4.0, 3.0] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = create_optimizer(5E-5, 10, 5 ) SCREAMING_SNAKE_CASE : List[Any] = tf.Variable([0.0, 0.0], trainable=A ) def accumulate_on_replica(A ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) ) @tf.function def accumulate(A, A ): with strategy.scope(): SCREAMING_SNAKE_CASE : Any = strategy.experimental_local_results(A ) local_variables[0].assign(A ) local_variables[1].assign(A ) strategy.run(A, args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A ) def _check_local_values(A, A ): SCREAMING_SNAKE_CASE : str = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value(), A, tol=1E-2 ) self.assertListAlmostEqual(values[1].value(), A, tol=1E-2 ) accumulate([1.0, 2.0], [-1.0, 1.0] ) accumulate([3.0, -1.0], [-1.0, -1.0] ) accumulate([-2.0, 2.0], [3.0, -2.0] ) self.assertEqual(accumulator.step, 3 ) _check_local_values([2.0, 3.0], [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) _check_local_values([0.0, 0.0], [0.0, 0.0] )
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' warnings.warn( 'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use VideoMAEImageProcessor instead.', A, ) super().__init__(*A, **A )
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: list[int | str] ): """simple docstring""" create_state_space_tree(__UpperCamelCase ,[] ,0 ,[0 for i in range(len(__UpperCamelCase ) )] ) def lowercase__( __UpperCamelCase: list[int | str] ,__UpperCamelCase: list[int | str] ,__UpperCamelCase: int ,__UpperCamelCase: list[int] ,): """simple docstring""" if index == len(__UpperCamelCase ): print(__UpperCamelCase ) return for i in range(len(__UpperCamelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE : int = True create_state_space_tree(__UpperCamelCase ,__UpperCamelCase ,index + 1 ,__UpperCamelCase ) current_sequence.pop() SCREAMING_SNAKE_CASE : Tuple = False UpperCamelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) UpperCamelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = ['''pixel_values'''] def __init__( self, A = True, A = None, A = PILImageResampling.BICUBIC, A = True, A = None, A = True, A = 1 / 255, A = True, A = IMAGENET_DEFAULT_MEAN, A = IMAGENET_DEFAULT_STD, **A, ): '''simple docstring''' super().__init__(**A ) SCREAMING_SNAKE_CASE : Any = size if size is not None else {'shortest_edge': 224} SCREAMING_SNAKE_CASE : List[str] = get_size_dict(A, default_to_square=A ) SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE : List[str] = get_size_dict(A, param_name='crop_size' ) SCREAMING_SNAKE_CASE : Any = do_resize SCREAMING_SNAKE_CASE : List[str] = size SCREAMING_SNAKE_CASE : Any = resample SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop SCREAMING_SNAKE_CASE : Any = crop_size SCREAMING_SNAKE_CASE : Tuple = do_rescale SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor SCREAMING_SNAKE_CASE : List[Any] = do_normalize SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BICUBIC, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = get_size_dict(A, default_to_square=A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: SCREAMING_SNAKE_CASE : str = int((256 / 224) * size['shortest_edge'] ) SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(A, size=A, default_to_square=A ) SCREAMING_SNAKE_CASE : Dict = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( A, size=(size_dict['height'], size_dict['width']), resample=A, data_format=A, **A ) def UpperCamelCase_ ( self, A, A, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" ) return center_crop(A, size=(size['height'], size['width']), data_format=A, **A ) def UpperCamelCase_ ( self, A, A, A = None, **A, ): '''simple docstring''' return rescale(A, scale=A, data_format=A, **A ) def UpperCamelCase_ ( self, A, A, A, A = None, **A, ): '''simple docstring''' return normalize(A, mean=A, std=A, data_format=A, **A ) def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE : List[str] = get_size_dict(A, default_to_square=A ) SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A, param_name='crop_size' ) SCREAMING_SNAKE_CASE : int = make_list_of_images(A ) if not valid_images(A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(A ) for image in images] if do_resize: SCREAMING_SNAKE_CASE : List[str] = [self.resize(A, A, A ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE : int = [self.center_crop(A, A ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : int = [self.rescale(A, A ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE : List[str] = [self.normalize(A, A, A ) for image in images] SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(A, A ) for image in images] SCREAMING_SNAKE_CASE : Tuple = {'pixel_values': images} return BatchFeature(data=A, tensor_type=A )
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase__( __UpperCamelCase: Any ): """simple docstring""" return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' ) SCREAMING_SNAKE_CASE : int = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' ) SCREAMING_SNAKE_CASE : str = key.replace('heads.cmd.itm_head.cls' ,'itm_head' ) SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' ) SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' ) SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' ) SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' ) SCREAMING_SNAKE_CASE : List[Any] = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' ) SCREAMING_SNAKE_CASE : List[str] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' ) SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('image_encoder.module' ,'flava.image_model' ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace('text_encoder.module' ,'flava.text_model' ) SCREAMING_SNAKE_CASE : Dict = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace('mm_encoder.module' ,'flava.multimodal_model' ) SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('text_projection' ,'flava.text_projection' ) SCREAMING_SNAKE_CASE : Optional[int] = key.replace('image_projection' ,'flava.image_projection' ) SCREAMING_SNAKE_CASE : Dict = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE : int = value return upgrade @torch.no_grad() def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=None ): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : int = FlavaConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaConfig() SCREAMING_SNAKE_CASE : str = FlavaForPreTraining(__UpperCamelCase ).eval() SCREAMING_SNAKE_CASE : int = convert_dalle_checkpoint(__UpperCamelCase ,__UpperCamelCase ,save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): SCREAMING_SNAKE_CASE : int = torch.load(__UpperCamelCase ,map_location='cpu' ) else: SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='cpu' ) SCREAMING_SNAKE_CASE : int = upgrade_state_dict(__UpperCamelCase ,__UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.state_dict() SCREAMING_SNAKE_CASE : Optional[int] = count_parameters(__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = set(range(3 ,__UpperCamelCase ,2 ) ) primes.add(2 ) for p in range(3 ,__UpperCamelCase ,2 ): if p not in primes: continue primes.difference_update(set(range(p * p ,__UpperCamelCase ,__UpperCamelCase ) ) ) SCREAMING_SNAKE_CASE : Any = [float(__UpperCamelCase ) for n in range(limit + 1 )] for p in primes: for n in range(__UpperCamelCase ,limit + 1 ,__UpperCamelCase ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = 0 if start < end: SCREAMING_SNAKE_CASE : Dict = randint(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = a[end] SCREAMING_SNAKE_CASE : Dict = a[pivot] SCREAMING_SNAKE_CASE : str = temp SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = _in_place_partition(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) count += _in_place_quick_sort(__UpperCamelCase ,__UpperCamelCase ,p - 1 ) count += _in_place_quick_sort(__UpperCamelCase ,p + 1 ,__UpperCamelCase ) return count def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Tuple ,__UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = randint(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = a[end] SCREAMING_SNAKE_CASE : int = a[pivot] SCREAMING_SNAKE_CASE : int = temp SCREAMING_SNAKE_CASE : str = start - 1 for index in range(__UpperCamelCase ,__UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value SCREAMING_SNAKE_CASE : Any = new_pivot_index + 1 SCREAMING_SNAKE_CASE : Tuple = a[new_pivot_index] SCREAMING_SNAKE_CASE : Tuple = a[index] SCREAMING_SNAKE_CASE : str = temp SCREAMING_SNAKE_CASE : List[Any] = a[new_pivot_index + 1] SCREAMING_SNAKE_CASE : Tuple = a[end] SCREAMING_SNAKE_CASE : Optional[int] = temp return new_pivot_index + 1, count UpperCamelCase_ = TemporaryFile() UpperCamelCase_ = 1_0_0 # 1000 elements are to be sorted UpperCamelCase_ , UpperCamelCase_ = 0, 1 # mean and standard deviation UpperCamelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array UpperCamelCase_ = np.load(outfile) UpperCamelCase_ = len(M) - 1 UpperCamelCase_ = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' import numpy as np def lowercase__( __UpperCamelCase: np.array ): """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' from timeit import timeit def lowercase__( __UpperCamelCase: int ): """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) SCREAMING_SNAKE_CASE : Optional[Any] = 0 while number: number &= number - 1 result += 1 return result def lowercase__( __UpperCamelCase: int ): """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase__( ): """simple docstring""" def do_benchmark(__UpperCamelCase: int ) -> None: SCREAMING_SNAKE_CASE : Any = 'import __main__ as z' print(f"Benchmark when {number = }:" ) print(f"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" ) SCREAMING_SNAKE_CASE : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' ,setup=__UpperCamelCase ) print(f"timeit() runs in {timing} seconds" ) print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" ) SCREAMING_SNAKE_CASE : str = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' ,setup=__UpperCamelCase ,) print(f"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase_ = { "configuration_owlvit": [ "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OwlViTConfig", "OwlViTOnnxConfig", "OwlViTTextConfig", "OwlViTVisionConfig", ], "processing_owlvit": ["OwlViTProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["OwlViTFeatureExtractor"] UpperCamelCase_ = ["OwlViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "OwlViTModel", "OwlViTPreTrainedModel", "OwlViTTextModel", "OwlViTVisionModel", "OwlViTForObjectDetection", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowercase__( __UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = image.size SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 SCREAMING_SNAKE_CASE : Optional[int] = image.resize((w, h) ,resample=PIL_INTERPOLATION['lanczos'] ) SCREAMING_SNAKE_CASE : Optional[Any] = np.array(__UpperCamelCase ).astype(np.floataa ) / 2_5_5.0 SCREAMING_SNAKE_CASE : int = image[None].transpose(0 ,3 ,1 ,2 ) SCREAMING_SNAKE_CASE : int = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A, A, A, ): '''simple docstring''' super().__init__() self.register_modules(vqvae=A, unet=A, scheduler=A ) @torch.no_grad() def __call__( self, A = None, A = 1, A = 100, A = 0.0, A = None, A = "pil", A = True, ): '''simple docstring''' if isinstance(A, PIL.Image.Image ): SCREAMING_SNAKE_CASE : int = 1 elif isinstance(A, torch.Tensor ): SCREAMING_SNAKE_CASE : Optional[Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A )}" ) if isinstance(A, PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[str] = preprocess(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image SCREAMING_SNAKE_CASE : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width) SCREAMING_SNAKE_CASE : Any = next(self.unet.parameters() ).dtype SCREAMING_SNAKE_CASE : int = randn_tensor(A, generator=A, device=self.device, dtype=A ) SCREAMING_SNAKE_CASE : Optional[Any] = image.to(device=self.device, dtype=A ) # set timesteps and move to the correct device self.scheduler.set_timesteps(A, device=self.device ) SCREAMING_SNAKE_CASE : str = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) SCREAMING_SNAKE_CASE : Tuple = {} if accepts_eta: SCREAMING_SNAKE_CASE : Any = eta for t in self.progress_bar(A ): # concat latents and low resolution image in the channel dimension. SCREAMING_SNAKE_CASE : List[str] = torch.cat([latents, image], dim=1 ) SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(A, A ) # predict the noise residual SCREAMING_SNAKE_CASE : List[str] = self.unet(A, A ).sample # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step(A, A, A, **A ).prev_sample # decode the image latents with the VQVAE SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(A ).sample SCREAMING_SNAKE_CASE : Dict = torch.clamp(A, -1.0, 1.0 ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Union[str, Any] = self.numpy_to_pil(A ) if not return_dict: return (image,) return ImagePipelineOutput(images=A )
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' import warnings from functools import wraps from typing import Callable def lowercase__( __UpperCamelCase: Callable ): """simple docstring""" @wraps(__UpperCamelCase ) def _inner_fn(*__UpperCamelCase: List[Any] ,**__UpperCamelCase: Any ): warnings.warn( (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") ,__UpperCamelCase ,) return fn(*__UpperCamelCase ,**__UpperCamelCase ) return _inner_fn
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = VideoMAEConfig() set_architecture_configs(__UpperCamelCase ,__UpperCamelCase ) if "finetuned" not in model_name: SCREAMING_SNAKE_CASE : Optional[int] = False if "finetuned" in model_name: SCREAMING_SNAKE_CASE : Optional[Any] = 'huggingface/label-files' if "kinetics" in model_name: SCREAMING_SNAKE_CASE : Tuple = 4_00 SCREAMING_SNAKE_CASE : str = 'kinetics400-id2label.json' elif "ssv2" in model_name: SCREAMING_SNAKE_CASE : Union[str, Any] = 1_74 SCREAMING_SNAKE_CASE : Union[str, Any] = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) ) SCREAMING_SNAKE_CASE : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[int] = idalabel SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()} return config def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Any ): """simple docstring""" if "small" in model_name: SCREAMING_SNAKE_CASE : Tuple = 3_84 SCREAMING_SNAKE_CASE : int = 15_36 SCREAMING_SNAKE_CASE : int = 12 SCREAMING_SNAKE_CASE : Optional[Any] = 16 SCREAMING_SNAKE_CASE : int = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 3 SCREAMING_SNAKE_CASE : Optional[Any] = 1_92 SCREAMING_SNAKE_CASE : int = 7_68 elif "large" in model_name: SCREAMING_SNAKE_CASE : Any = 10_24 SCREAMING_SNAKE_CASE : Optional[Any] = 40_96 SCREAMING_SNAKE_CASE : Optional[Any] = 24 SCREAMING_SNAKE_CASE : Optional[int] = 16 SCREAMING_SNAKE_CASE : List[str] = 12 SCREAMING_SNAKE_CASE : Any = 8 SCREAMING_SNAKE_CASE : Optional[Any] = 5_12 SCREAMING_SNAKE_CASE : Optional[int] = 20_48 elif "huge" in model_name: SCREAMING_SNAKE_CASE : Optional[int] = 12_80 SCREAMING_SNAKE_CASE : List[str] = 51_20 SCREAMING_SNAKE_CASE : Tuple = 32 SCREAMING_SNAKE_CASE : List[Any] = 16 SCREAMING_SNAKE_CASE : Any = 12 SCREAMING_SNAKE_CASE : str = 8 SCREAMING_SNAKE_CASE : Union[str, Any] = 6_40 SCREAMING_SNAKE_CASE : Tuple = 25_60 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowercase__( __UpperCamelCase: str ): """simple docstring""" if "encoder." in name: SCREAMING_SNAKE_CASE : int = name.replace('encoder.' ,'' ) if "cls_token" in name: SCREAMING_SNAKE_CASE : Any = name.replace('cls_token' ,'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: SCREAMING_SNAKE_CASE : List[Any] = name.replace('decoder_pos_embed' ,'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: SCREAMING_SNAKE_CASE : Any = name.replace('pos_embed' ,'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE : Any = name.replace('patch_embed.proj' ,'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE : Any = name.replace('patch_embed.norm' ,'videomae.embeddings.norm' ) if "decoder.blocks" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace('decoder.blocks' ,'decoder.decoder_layers' ) if "blocks" in name: SCREAMING_SNAKE_CASE : Any = name.replace('blocks' ,'videomae.encoder.layer' ) if "attn.proj" in name: SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('attn.proj' ,'attention.output.dense' ) if "attn" in name and "bias" not in name: SCREAMING_SNAKE_CASE : List[str] = name.replace('attn' ,'attention.self' ) if "attn" in name: SCREAMING_SNAKE_CASE : str = name.replace('attn' ,'attention.attention' ) if "norm1" in name: SCREAMING_SNAKE_CASE : str = name.replace('norm1' ,'layernorm_before' ) if "norm2" in name: SCREAMING_SNAKE_CASE : Dict = name.replace('norm2' ,'layernorm_after' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE : Tuple = name.replace('mlp.fc1' ,'intermediate.dense' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE : Dict = name.replace('mlp.fc2' ,'output.dense' ) if "decoder_embed" in name: SCREAMING_SNAKE_CASE : Any = name.replace('decoder_embed' ,'decoder.decoder_embed' ) if "decoder_norm" in name: SCREAMING_SNAKE_CASE : Optional[int] = name.replace('decoder_norm' ,'decoder.decoder_norm' ) if "decoder_pred" in name: SCREAMING_SNAKE_CASE : List[Any] = name.replace('decoder_pred' ,'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: SCREAMING_SNAKE_CASE : List[str] = name.replace('norm.weight' ,'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: SCREAMING_SNAKE_CASE : List[str] = name.replace('norm.bias' ,'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: SCREAMING_SNAKE_CASE : Any = name.replace('head' ,'classifier' ) return name def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ): """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE : int = orig_state_dict.pop(__UpperCamelCase ) if key.startswith('encoder.' ): SCREAMING_SNAKE_CASE : List[str] = key.replace('encoder.' ,'' ) if "qkv" in key: SCREAMING_SNAKE_CASE : Optional[Any] = key.split('.' ) if key.startswith('decoder.blocks' ): SCREAMING_SNAKE_CASE : Any = config.decoder_hidden_size SCREAMING_SNAKE_CASE : str = int(key_split[2] ) SCREAMING_SNAKE_CASE : Dict = 'decoder.decoder_layers.' if "weight" in key: SCREAMING_SNAKE_CASE : int = val[:dim, :] SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE : str = val[-dim:, :] else: SCREAMING_SNAKE_CASE : Optional[int] = config.hidden_size SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[1] ) SCREAMING_SNAKE_CASE : Tuple = 'videomae.encoder.layer.' if "weight" in key: SCREAMING_SNAKE_CASE : Any = val[:dim, :] SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:, :] else: SCREAMING_SNAKE_CASE : List[Any] = val return orig_state_dict def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' ) SCREAMING_SNAKE_CASE : Optional[Any] = np.load(__UpperCamelCase ) return list(__UpperCamelCase ) def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = get_videomae_config(__UpperCamelCase ) if "finetuned" in model_name: SCREAMING_SNAKE_CASE : Tuple = VideoMAEForVideoClassification(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[Any] = VideoMAEForPreTraining(__UpperCamelCase ) # download original checkpoint, hosted on Google Drive SCREAMING_SNAKE_CASE : List[str] = 'pytorch_model.bin' gdown.cached_download(__UpperCamelCase ,__UpperCamelCase ,quiet=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = torch.load(__UpperCamelCase ,map_location='cpu' ) if "model" in files: SCREAMING_SNAKE_CASE : Optional[int] = files['model'] else: SCREAMING_SNAKE_CASE : Optional[Any] = files['module'] SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(__UpperCamelCase ,__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # verify model on basic input SCREAMING_SNAKE_CASE : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_video() SCREAMING_SNAKE_CASE : Any = image_processor(__UpperCamelCase ,return_tensors='pt' ) if "finetuned" not in model_name: SCREAMING_SNAKE_CASE : int = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' ) SCREAMING_SNAKE_CASE : List[Any] = torch.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = model(**__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits SCREAMING_SNAKE_CASE : int = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([1, 4_00] ) SCREAMING_SNAKE_CASE : int = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": SCREAMING_SNAKE_CASE : Any = torch.Size([1, 1_74] ) SCREAMING_SNAKE_CASE : str = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 14_08, 15_36] ) SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([1, 14_08, 15_36] ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one SCREAMING_SNAKE_CASE : str = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 14_08, 15_36] ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 4_00] ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": SCREAMING_SNAKE_CASE : int = torch.Size([1, 4_00] ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": SCREAMING_SNAKE_CASE : Any = torch.Size([1, 4_00] ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 4_00] ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 14_08, 15_36] ) SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": SCREAMING_SNAKE_CASE : Any = torch.Size([1, 1_74] ) SCREAMING_SNAKE_CASE : Any = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([1, 14_08, 15_36] ) SCREAMING_SNAKE_CASE : str = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 1_74] ) SCREAMING_SNAKE_CASE : int = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f"Model name not supported. Should be one of {model_names}" ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) else: print('Logits:' ,logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ,organization='nielsr' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[int] = CpmAntTokenizer A : Dict = False def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : List[str] = [ '<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '我', '是', 'C', 'P', 'M', 'A', 'n', 't', ] SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) @tooslow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' ) SCREAMING_SNAKE_CASE : Tuple = '今天天气真好!' SCREAMING_SNAKE_CASE : Tuple = ['今天', '天气', '真', '好', '!'] SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : int = '今天天气真好!' SCREAMING_SNAKE_CASE : Dict = [tokenizer.bos_token] + tokens SCREAMING_SNAKE_CASE : List[Any] = [6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(A ) self.assertEqual(A, A )
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = "▁" UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Dict = BigBirdTokenizer A : Any = BigBirdTokenizerFast A : Optional[int] = True A : Any = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class(A, keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = '<s>' SCREAMING_SNAKE_CASE : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '<unk>' ) self.assertEqual(vocab_keys[1], '<s>' ) self.assertEqual(vocab_keys[-1], '[MASK]' ) self.assertEqual(len(A ), 1_004 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_000 ) def UpperCamelCase_ ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : List[str] = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(A ) SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(A, add_special_tokens=A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.encode(A ) self.assertListEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = BigBirdTokenizer(A, keep_accents=A ) SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ), [285, 46, 10, 170, 382], ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ], ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ], ) @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 'Hello World!' SCREAMING_SNAKE_CASE : int = [65, 18_536, 2_260, 101, 66] self.assertListEqual(A, self.big_tokenizer.encode(A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) # fmt: off SCREAMING_SNAKE_CASE : int = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(A, self.big_tokenizer.encode(A ) ) @require_torch @slow def UpperCamelCase_ ( self ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence SCREAMING_SNAKE_CASE : str = list(self.big_tokenizer.get_vocab().keys() )[:10] SCREAMING_SNAKE_CASE : Any = ' '.join(A ) SCREAMING_SNAKE_CASE : str = self.big_tokenizer.encode_plus(A, return_tensors='pt', return_token_type_ids=A ) SCREAMING_SNAKE_CASE : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence], return_tensors='pt', return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[int] = BigBirdConfig(attention_type='original_full' ) SCREAMING_SNAKE_CASE : Optional[int] = BigBirdModel(A ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**A ) model(**A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids ) self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {'input_ids': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A, model_name='google/bigbird-roberta-base', revision='215c99f1600e06f83acce68422f2035b2b5c3510', )
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=64, A=3, A=3, A=2, A=1, A=16, A=[128, 256, 384], A=[4, 6, 8], A=[2, 3, 4], A=[16, 16, 16], A=0, A=[2, 2, 2], A=[2, 2, 2], A=0.02, A=True, A=True, A=2, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = parent SCREAMING_SNAKE_CASE : Optional[Any] = batch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : List[Any] = kernel_size SCREAMING_SNAKE_CASE : Tuple = stride SCREAMING_SNAKE_CASE : Optional[int] = padding SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = depths SCREAMING_SNAKE_CASE : Tuple = key_dim SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate SCREAMING_SNAKE_CASE : Any = patch_size SCREAMING_SNAKE_CASE : Any = attention_ratio SCREAMING_SNAKE_CASE : List[str] = mlp_ratio SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE : str = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_labels SCREAMING_SNAKE_CASE : Optional[Any] = num_labels SCREAMING_SNAKE_CASE : List[Any] = initializer_range def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : str = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): '''simple docstring''' return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = LevitModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A ) SCREAMING_SNAKE_CASE : int = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : Optional[int] = LevitForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Tuple = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Dict = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) A : Optional[int] = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) A : Optional[Any] = False A : List[Any] = False A : str = False A : Dict = False A : Dict = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = LevitModelTester(self ) SCREAMING_SNAKE_CASE : Dict = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): '''simple docstring''' return @unittest.skip(reason='Levit does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='Levit does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='Levit does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states SCREAMING_SNAKE_CASE : Dict = len(self.model_tester.depths ) + 1 self.assertEqual(len(A ), A ) SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE : List[str] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE : str = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [ height * width, self.model_tester.hidden_sizes[0], ], ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = super()._prepare_for_class(A, A, return_labels=A ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(A ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.train() SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(A, A, return_labels=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A ).loss loss.backward() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) model.gradient_checkpointing_enable() model.to(A ) model.train() SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(A, A, return_labels=A ) SCREAMING_SNAKE_CASE : Any = model(**A ).loss loss.backward() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Optional[Any] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(A ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ): SCREAMING_SNAKE_CASE : List[str] = problem_type['title'] SCREAMING_SNAKE_CASE : Tuple = problem_type['num_labels'] SCREAMING_SNAKE_CASE : Tuple = model_class(A ) model.to(A ) model.train() SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(A, A, return_labels=A ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] ) SCREAMING_SNAKE_CASE : Optional[int] = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=A ) as warning_list: SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : List[str] = LevitModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( A ) SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : int = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCamelCase_ = pytest.mark.integration UpperCamelCase_ = {"comet"} UpperCamelCase_ = importlib.util.find_spec("fairseq") is not None UpperCamelCase_ = {"code_eval"} UpperCamelCase_ = os.name == "nt" UpperCamelCase_ = {"bertscore", "frugalscore", "perplexity"} UpperCamelCase_ = importlib.util.find_spec("transformers") is not None def lowercase__( __UpperCamelCase: Tuple ): """simple docstring""" @wraps(__UpperCamelCase ) def wrapper(self: Optional[Any] ,__UpperCamelCase: Tuple ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self ,__UpperCamelCase ) return wrapper def lowercase__( __UpperCamelCase: str ): """simple docstring""" @wraps(__UpperCamelCase ) def wrapper(self: Any ,__UpperCamelCase: Optional[int] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self ,__UpperCamelCase ) return wrapper def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" @wraps(__UpperCamelCase ) def wrapper(self: Optional[int] ,__UpperCamelCase: Tuple ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self ,__UpperCamelCase ) return wrapper def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @local class _a ( parameterized.TestCase ): '''simple docstring''' A : Any = {} A : List[Any] = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = '[...]' SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics', A ) ).module_path ) SCREAMING_SNAKE_CASE : int = datasets.load.import_main_class(metric_module.__name__, dataset=A ) # check parameters SCREAMING_SNAKE_CASE : int = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(A, metric_module.__name__ ): with self.use_local_metrics(): try: SCREAMING_SNAKE_CASE : Any = doctest.testmod(A, verbose=A, raise_on_error=A ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed, 0 ) self.assertGreater(results.attempted, 1 ) @slow def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = '[...]' SCREAMING_SNAKE_CASE : str = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics', A ) ).module_path ) # run doctest with self.use_local_metrics(): SCREAMING_SNAKE_CASE : Tuple = doctest.testmod(A, verbose=A, raise_on_error=A ) self.assertEqual(results.failed, 0 ) self.assertGreater(results.attempted, 1 ) @contextmanager def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](A ): yield else: yield @contextmanager def UpperCamelCase_ ( self ): '''simple docstring''' def load_local_metric(A, *A, **A ): return load_metric(os.path.join('metrics', A ), *A, **A ) with patch('datasets.load_metric' ) as mock_load_metric: SCREAMING_SNAKE_CASE : Tuple = load_local_metric yield @classmethod def UpperCamelCase_ ( cls, A ): '''simple docstring''' def wrapper(A ): SCREAMING_SNAKE_CASE : str = contextmanager(A ) SCREAMING_SNAKE_CASE : int = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowercase__( __UpperCamelCase: Union[str, Any] ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' ,'' ,'' ) # handle pytest cli flags class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: SCREAMING_SNAKE_CASE : Optional[Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowercase__( __UpperCamelCase: Union[str, Any] ): """simple docstring""" import torch def bert_cos_score_idf(__UpperCamelCase: int ,__UpperCamelCase: List[Any] ,*__UpperCamelCase: Optional[Any] ,**__UpperCamelCase: Dict ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: SCREAMING_SNAKE_CASE : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowercase__( __UpperCamelCase: Dict ): """simple docstring""" def load_from_checkpoint(__UpperCamelCase: Any ): class _a : '''simple docstring''' def UpperCamelCase_ ( self, A, *A, **A ): '''simple docstring''' assert len(A ) == 2 SCREAMING_SNAKE_CASE : Optional[int] = [0.19, 0.92] return scores, sum(A ) / len(A ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: SCREAMING_SNAKE_CASE : Any = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: SCREAMING_SNAKE_CASE : Dict = load_from_checkpoint yield def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = load_metric(os.path.join('metrics' ,'seqeval' ) ) SCREAMING_SNAKE_CASE : List[Any] = 'ERROR' SCREAMING_SNAKE_CASE : Union[str, Any] = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ): metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ): """simple docstring""" if height >= 1: move_tower(height - 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) move_disk(__UpperCamelCase ,__UpperCamelCase ) move_tower(height - 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: str ): """simple docstring""" print('moving disk from' ,__UpperCamelCase ,'to' ,__UpperCamelCase ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = int(input('Height of hanoi: ' ).strip() ) move_tower(__UpperCamelCase ,'A' ,'B' ,'C' ) if __name__ == "__main__": main()
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : torch.FloatTensor class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__( self, A = 3, A = 3, A = ("DownEncoderBlock2D",), A = ("UpDecoderBlock2D",), A = (64,), A = 1, A = "silu", A = 3, A = 32, A = 256, A = 32, A = None, A = 0.1_82_15, A = "group", ): '''simple docstring''' super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE : Optional[int] = Encoder( in_channels=A, out_channels=A, down_block_types=A, block_out_channels=A, layers_per_block=A, act_fn=A, norm_num_groups=A, double_z=A, ) SCREAMING_SNAKE_CASE : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE : str = nn.Convad(A, A, 1 ) SCREAMING_SNAKE_CASE : List[str] = VectorQuantizer(A, A, beta=0.25, remap=A, sane_index_shape=A ) SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(A, A, 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE : str = Decoder( in_channels=A, out_channels=A, up_block_types=A, block_out_channels=A, layers_per_block=A, act_fn=A, norm_num_groups=A, norm_type=A, ) @apply_forward_hook def UpperCamelCase_ ( self, A, A = True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder(A ) SCREAMING_SNAKE_CASE : Dict = self.quant_conv(A ) if not return_dict: return (h,) return VQEncoderOutput(latents=A ) @apply_forward_hook def UpperCamelCase_ ( self, A, A = False, A = True ): '''simple docstring''' if not force_not_quantize: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.quantize(A ) else: SCREAMING_SNAKE_CASE : List[Any] = h SCREAMING_SNAKE_CASE : List[Any] = self.post_quant_conv(A ) SCREAMING_SNAKE_CASE : Tuple = self.decoder(A, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=A ) def UpperCamelCase_ ( self, A, A = True ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = sample SCREAMING_SNAKE_CASE : List[str] = self.encode(A ).latents SCREAMING_SNAKE_CASE : str = self.decode(A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=A )
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging UpperCamelCase_ = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A = 101 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = length def __len__( self ): '''simple docstring''' return self.length def __getitem__( self, A ): '''simple docstring''' return i class _a : '''simple docstring''' def __call__( self, A ): '''simple docstring''' return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )} class _a ( nn.Module ): '''simple docstring''' def __init__( self ): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. SCREAMING_SNAKE_CASE : List[str] = nn.Linear(120, 80 ) def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' if labels is not None: return torch.tensor(0.0, device=input_ids.device ), input_ids else: return input_ids class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @require_torch_neuroncore def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : str = F"--output_dir {output_dir}".split() SCREAMING_SNAKE_CASE : Dict = ['torchrun'] + distributed_args + args execute_subprocess_async(A, env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @require_torch_multi_gpu def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : Optional[int] = F"--output_dir {output_dir}".split() SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun'] + distributed_args + args execute_subprocess_async(A, env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py UpperCamelCase_ = HfArgumentParser((TrainingArguments,)) UpperCamelCase_ = parser.parse_args_into_dataclasses()[0] logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_0_1, 4_0, 7]: UpperCamelCase_ = DummyDataset(dataset_length) def lowercase__( __UpperCamelCase: EvalPrediction ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = list(range(len(__UpperCamelCase ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( 'Predictions and/or labels do not match expected results:\n - predictions: ' f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} UpperCamelCase_ = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) UpperCamelCase_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCamelCase_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCamelCase_ = 2 UpperCamelCase_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCamelCase_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCamelCase_ = None
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[Any] = MgpstrTokenizer A : Any = False A : Any = {} A : List[str] = False def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on SCREAMING_SNAKE_CASE : List[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'tester' SCREAMING_SNAKE_CASE : List[Any] = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE : Union[str, Any] = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode([special_token], add_special_tokens=A ) self.assertEqual(len(A ), 1 ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A, skip_special_tokens=A ) self.assertTrue(special_token not in decoded ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.get_input_output_texts(A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(A, add_special_tokens=A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(A ) self.assertNotEqual(len(A ), 0 ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A ) self.assertIsInstance(A, A ) self.assertEqual(text_a.replace(' ', '' ), A ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=3, A=4, A=[10, 20, 30, 40], A=[2, 2, 3, 2], A=True, A=True, A=37, A="gelu", A=10, A=0.02, A=["stage2", "stage3", "stage4"], A=3, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = parent SCREAMING_SNAKE_CASE : str = batch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = num_stages SCREAMING_SNAKE_CASE : List[str] = hidden_sizes SCREAMING_SNAKE_CASE : Optional[Any] = depths SCREAMING_SNAKE_CASE : Dict = is_training SCREAMING_SNAKE_CASE : Dict = use_labels SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : List[Any] = out_features SCREAMING_SNAKE_CASE : Any = num_labels SCREAMING_SNAKE_CASE : Union[str, Any] = scope SCREAMING_SNAKE_CASE : Any = num_stages def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : int = None if self.use_labels: SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=A, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=A, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = UperNetForSemanticSegmentation(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Any = model(A ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Dict = config_and_inputs SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = (UperNetForSemanticSegmentation,) if is_torch_available() else () A : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} A : List[Any] = False A : Tuple = False A : Optional[Any] = False A : str = False A : List[str] = False A : int = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = UperNetModelTester(self ) SCREAMING_SNAKE_CASE : Any = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): '''simple docstring''' return def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[Any] = model_class(A ) SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : str = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages self.assertEqual(len(A ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Any = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Any = _config_zero_init(A ) SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : str = model_class(config=A ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason='UperNet does not have tied weights' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' ,repo_type='dataset' ,filename='ADE_val_00000001.jpg' ) SCREAMING_SNAKE_CASE : Optional[int] = Image.open(__UpperCamelCase ).convert('RGB' ) return image @require_torch @require_vision @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) SCREAMING_SNAKE_CASE : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(A ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : str = processor(images=A, return_tensors='pt' ).to(A ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = model(**A ) SCREAMING_SNAKE_CASE : str = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], A, atol=1E-4 ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(A ) SCREAMING_SNAKE_CASE : Tuple = prepare_img() SCREAMING_SNAKE_CASE : int = processor(images=A, return_tensors='pt' ).to(A ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = model(**A ) SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : Any = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], A, atol=1E-4 ) )
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 1_2_8_0_2_2 UpperCamelCase_ = 1_2_8_0_2_8 @require_sentencepiece class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[str] = MaMaaaTokenizer A : Optional[int] = False A : Tuple = False A : Optional[Any] = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : str = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] SCREAMING_SNAKE_CASE : List[str] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : List[str] = Path(self.tmpdirname ) save_json(A, save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(A, save_dir / VOCAB_FILES_NAMES['spm_file'] ) SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return ( "This is a test", "This is a test", ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = '</s>' SCREAMING_SNAKE_CASE : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Tuple = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0], '</s>' ) self.assertEqual(vocab_keys[1], '<unk>' ) self.assertEqual(vocab_keys[-1], '<s>' ) self.assertEqual(len(A ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ), [2, 3, 4, 5, 6], ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(A ) self.assertEqual(A, 'This is a test' ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', ) @require_torch @require_sentencepiece @require_tokenizers class _a ( unittest.TestCase ): '''simple docstring''' A : Dict = '''facebook/m2m100_418M''' A : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] A : Optional[int] = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off A : Dict = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2] @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name, src_lang='en', tgt_lang='fr' ) SCREAMING_SNAKE_CASE : List[Any] = 1 return cls def UpperCamelCase_ ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('ar' ), 128_006 ) self.assertEqual(self.tokenizer.get_lang_id('en' ), 128_022 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ), 128_076 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ), 128_063 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_vocab() self.assertEqual(len(A ), self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'], 3 ) self.assertIn(self.tokenizer.get_lang_token('en' ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 'en' SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertIn(A, self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE : List[Any] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(A, skip_special_tokens=A ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=A ) self.assertEqual(A, A ) self.assertNotIn(self.tokenizer.eos_token, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(A ) SCREAMING_SNAKE_CASE : Optional[int] = MaMaaaTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.lang_token_to_id, A ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 'en' SCREAMING_SNAKE_CASE : Tuple = 'fr' SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right( batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id ) for k in batch: SCREAMING_SNAKE_CASE : Any = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) SCREAMING_SNAKE_CASE : str = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) SCREAMING_SNAKE_CASE : int = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar' ) self.assertEqual( nested_simplify(A ), { # en_XX, A, test, EOS 'input_ids': [[128_022, 58, 4_183, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 128_006, }, )
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : int = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Tuple = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : int = (1 - _cos) / 2 SCREAMING_SNAKE_CASE : List[str] = 1 - _cos SCREAMING_SNAKE_CASE : List[Any] = 1 + alpha SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha SCREAMING_SNAKE_CASE : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Any = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : str = (1 + _cos) / 2 SCREAMING_SNAKE_CASE : int = -1 - _cos SCREAMING_SNAKE_CASE : Tuple = 1 + alpha SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos SCREAMING_SNAKE_CASE : Tuple = 1 - alpha SCREAMING_SNAKE_CASE : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Tuple = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : List[str] = _sin / 2 SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : int = -ba SCREAMING_SNAKE_CASE : List[str] = 1 + alpha SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos SCREAMING_SNAKE_CASE : Dict = 1 - alpha SCREAMING_SNAKE_CASE : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Optional[Any] = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : Dict = 1 - alpha SCREAMING_SNAKE_CASE : int = -2 * _cos SCREAMING_SNAKE_CASE : Tuple = 1 + alpha SCREAMING_SNAKE_CASE : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: float = 1 / sqrt(2 ) ,): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Tuple = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : List[str] = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + alpha * big_a SCREAMING_SNAKE_CASE : str = -2 * _cos SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha * big_a SCREAMING_SNAKE_CASE : List[str] = 1 + alpha / big_a SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos SCREAMING_SNAKE_CASE : Any = 1 - alpha / big_a SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: float = 1 / sqrt(2 ) ,): """simple docstring""" SCREAMING_SNAKE_CASE : int = tau * frequency / samplerate SCREAMING_SNAKE_CASE : List[Any] = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : Tuple = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE : str = (big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE : str = (big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE : Optional[Any] = 2 * sqrt(__UpperCamelCase ) * alpha SCREAMING_SNAKE_CASE : Optional[Any] = big_a * (pmc + aaa) SCREAMING_SNAKE_CASE : str = 2 * big_a * mpc SCREAMING_SNAKE_CASE : Dict = big_a * (pmc - aaa) SCREAMING_SNAKE_CASE : str = ppmc + aaa SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * pmpc SCREAMING_SNAKE_CASE : List[Any] = ppmc - aaa SCREAMING_SNAKE_CASE : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: float = 1 / sqrt(2 ) ,): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Any = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[str] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE : Dict = (big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE : str = (big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE : Optional[int] = (big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE : Optional[int] = (big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE : Dict = 2 * sqrt(__UpperCamelCase ) * alpha SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc + aaa) SCREAMING_SNAKE_CASE : Tuple = -2 * big_a * pmpc SCREAMING_SNAKE_CASE : Optional[Any] = big_a * (ppmc - aaa) SCREAMING_SNAKE_CASE : Optional[Any] = pmc + aaa SCREAMING_SNAKE_CASE : Dict = 2 * mpc SCREAMING_SNAKE_CASE : int = pmc - aaa SCREAMING_SNAKE_CASE : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["XLNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["XLNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker UpperCamelCase_ = "CompVis/stable-diffusion-v1-1" UpperCamelCase_ = "CompVis/stable-diffusion-v1-2" UpperCamelCase_ = "CompVis/stable-diffusion-v1-3" UpperCamelCase_ = "CompVis/stable-diffusion-v1-4" class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A, A, A, A, A, A, A, A = True, ): '''simple docstring''' super()._init_() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline.from_pretrained(A ) SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(A ) SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline( vae=A, text_encoder=A, tokenizer=A, unet=A, scheduler=A, safety_checker=A, feature_extractor=A, requires_safety_checker=A, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return {k: getattr(self, A ) for k in self.config.keys() if not k.startswith('_' )} def UpperCamelCase_ ( self, A = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.enable_attention_slicing(A ) @torch.no_grad() def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ): '''simple docstring''' return self.pipea( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) @torch.no_grad() def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ): '''simple docstring''' return self.pipea( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) @torch.no_grad() def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ): '''simple docstring''' return self.pipea( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) @torch.no_grad() def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ): '''simple docstring''' return self.pipea( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) @torch.no_grad() def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(A ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : List[Any] = self.textaimg_sda_a( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a( prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration UpperCamelCase_ = HfArgumentParser(InitializationArguments) UpperCamelCase_ = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks UpperCamelCase_ = { "vocab_size": len(tokenizer), "scale_attn_by_inverse_layer_idx": True, "reorder_and_upcast_attn": True, } # Load model config (GPT-2 large in this case) UpperCamelCase_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config UpperCamelCase_ = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' import datasets from .evaluate import evaluate UpperCamelCase_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" UpperCamelCase_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" UpperCamelCase_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )}, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ), codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions} SCREAMING_SNAKE_CASE : Any = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] SCREAMING_SNAKE_CASE : str = evaluate(dataset=A, predictions=A ) return score
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: float ,__UpperCamelCase: float ): """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance < 0: raise ValueError('Resistance cannot be negative' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class _a : '''simple docstring''' def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : List[Any] = seq_length SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask SCREAMING_SNAKE_CASE : Dict = use_token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Any = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : int = type_vocab_size SCREAMING_SNAKE_CASE : int = type_sequence_label_size SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = num_labels SCREAMING_SNAKE_CASE : Tuple = num_choices SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : Tuple = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size], self.num_choices ) SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = NystromformerModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Tuple = model(A, attention_mask=A, token_type_ids=A ) SCREAMING_SNAKE_CASE : Any = model(A, token_type_ids=A ) SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = NystromformerForMaskedLM(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A, attention_mask=A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = NystromformerForQuestionAnswering(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : int = model( A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.num_labels SCREAMING_SNAKE_CASE : List[str] = NystromformerForSequenceClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : List[str] = NystromformerForTokenClassification(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices SCREAMING_SNAKE_CASE : Tuple = NystromformerForMultipleChoice(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Dict = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() SCREAMING_SNAKE_CASE : str = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() SCREAMING_SNAKE_CASE : Union[str, Any] = model( A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) A : Tuple = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = NystromformerModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE : List[str] = type self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[int] = NystromformerModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class _a ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[int] = model(A )[0] SCREAMING_SNAKE_CASE : Any = torch.Size((1, 6, 768) ) self.assertEqual(output.shape, A ) SCREAMING_SNAKE_CASE : str = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = 'the [MASK] of Belgium is Brussels' SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) SCREAMING_SNAKE_CASE : Dict = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) SCREAMING_SNAKE_CASE : Dict = tokenizer(A, return_tensors='pt' ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(encoding.input_ids ).logits SCREAMING_SNAKE_CASE : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(A ), 'capital' )
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class _a ( nn.Module ): '''simple docstring''' def __init__( self ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = nn.Linear(3, 4 ) SCREAMING_SNAKE_CASE : Tuple = nn.BatchNormad(4 ) SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(4, 5 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(A ) ) ) class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(A, model.state_dict() ) SCREAMING_SNAKE_CASE : Tuple = os.path.join(A, 'index.json' ) self.assertTrue(os.path.isfile(A ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(A, F"{key}.dat" ) self.assertTrue(os.path.isfile(A ) ) # TODO: add tests on the fact weights are properly loaded def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: SCREAMING_SNAKE_CASE : int = torch.randn(2, 3, dtype=A ) with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : str = offload_weight(A, 'weight', A, {} ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(A, 'weight.dat' ) self.assertTrue(os.path.isfile(A ) ) self.assertDictEqual(A, {'weight': {'shape': [2, 3], 'dtype': str(A ).split('.' )[1]}} ) SCREAMING_SNAKE_CASE : Any = load_offloaded_weight(A, index['weight'] ) self.assertTrue(torch.equal(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ModelForTest() SCREAMING_SNAKE_CASE : List[str] = model.state_dict() SCREAMING_SNAKE_CASE : str = {k: v for k, v in state_dict.items() if 'linear2' not in k} SCREAMING_SNAKE_CASE : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(A, A ) SCREAMING_SNAKE_CASE : Any = OffloadedWeightsLoader(state_dict=A, save_folder=A ) # Every key is there with the right value self.assertEqual(sorted(A ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(A, weight_map[key] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k} SCREAMING_SNAKE_CASE : int = {k: v for k, v in state_dict.items() if 'weight' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(A, A ) SCREAMING_SNAKE_CASE : List[str] = OffloadedWeightsLoader(state_dict=A, save_folder=A ) # Every key is there with the right value self.assertEqual(sorted(A ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(A, weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(A, A ) # Duplicates are removed SCREAMING_SNAKE_CASE : List[str] = OffloadedWeightsLoader(state_dict=A, save_folder=A ) # Every key is there with the right value self.assertEqual(sorted(A ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(A, weight_map[key] ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {'a.1': 0, 'a.10': 1, 'a.2': 2} SCREAMING_SNAKE_CASE : Dict = extract_submodules_state_dict(A, ['a.1', 'a.2'] ) self.assertDictEqual(A, {'a.1': 0, 'a.2': 2} ) SCREAMING_SNAKE_CASE : Tuple = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2} SCREAMING_SNAKE_CASE : str = extract_submodules_state_dict(A, ['a.1', 'a.2'] ) self.assertDictEqual(A, {'a.1.a': 0, 'a.2.a': 2} )
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "spiece.model"} UpperCamelCase_ = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } UpperCamelCase_ = { "AI-Sweden/gpt-sw3-126m": 2_0_4_8, "AI-Sweden/gpt-sw3-350m": 2_0_4_8, "AI-Sweden/gpt-sw3-1.6b": 2_0_4_8, "AI-Sweden/gpt-sw3-6.7b": 2_0_4_8, "AI-Sweden/gpt-sw3-20b": 2_0_4_8, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : int = VOCAB_FILES_NAMES A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self, A, A=False, A=False, A=False, A=None, A=None, A=None, A=None, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) SCREAMING_SNAKE_CASE : Dict = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing SCREAMING_SNAKE_CASE : Optional[Any] = '<|endoftext|>' if eos_token is None else eos_token SCREAMING_SNAKE_CASE : int = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: SCREAMING_SNAKE_CASE : Dict = unk_token if pad_token is None else pad_token SCREAMING_SNAKE_CASE : List[Any] = eos_token if bos_token is None else bos_token else: SCREAMING_SNAKE_CASE : Optional[int] = '<pad>' if pad_token is None else pad_token SCREAMING_SNAKE_CASE : int = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=A, remove_space=A, keep_accents=A, bos_token=A, eos_token=A, unk_token=A, pad_token=A, sp_model_kwargs=self.sp_model_kwargs, **A, ) SCREAMING_SNAKE_CASE : Tuple = do_lower_case SCREAMING_SNAKE_CASE : Optional[Any] = remove_space SCREAMING_SNAKE_CASE : List[Any] = keep_accents SCREAMING_SNAKE_CASE : Tuple = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off SCREAMING_SNAKE_CASE : Any = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing SCREAMING_SNAKE_CASE : str = re.compile( F"[{''.join(map(A, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8_203] ) )}]" ) def __getstate__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.__dict__.copy() SCREAMING_SNAKE_CASE : Optional[int] = None return state def __setstate__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self ): '''simple docstring''' return len(self.sp_model ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.non_printing_characters_re.sub('', A ) # Normalize whitespaces SCREAMING_SNAKE_CASE : Any = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization SCREAMING_SNAKE_CASE : List[str] = unicodedata.normalize('NFC', A ) return text def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.preprocess_text(A ) return self.sp_model.encode(A, out_type=A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A ): '''simple docstring''' return out_string def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : str = '' SCREAMING_SNAKE_CASE : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[str] = [] else: current_sub_tokens.append(A ) SCREAMING_SNAKE_CASE : int = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' if not os.path.isdir(A ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE : Any = os.path.join( A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, A ) elif not os.path.isfile(self.vocab_file ): with open(A, 'wb' ) as fi: SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self, A, A = False ): '''simple docstring''' if isinstance(A, A ): SCREAMING_SNAKE_CASE : Optional[int] = self.preprocess_text(A ) SCREAMING_SNAKE_CASE : Tuple = self.sp_model.encode(A ) else: SCREAMING_SNAKE_CASE : List[Any] = [self.preprocess_text(A ) for t in text] SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()] SCREAMING_SNAKE_CASE : Any = ( F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(A ) + F"{self.bos_token}Bot:" ) return self.encode(text=A )
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' import csv import tweepy # Twitter API credentials UpperCamelCase_ = "" UpperCamelCase_ = "" UpperCamelCase_ = "" UpperCamelCase_ = "" def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tweepy.OAuthHandler(__UpperCamelCase ,__UpperCamelCase ) auth.set_access_token(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tweepy.API(__UpperCamelCase ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Optional[int] = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : str = api.user_timeline(screen_name=__UpperCamelCase ,count=2_00 ) # save most recent tweets alltweets.extend(__UpperCamelCase ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Optional[Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__UpperCamelCase ) > 0: print(f"getting tweets before {oldest}" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : str = api.user_timeline( screen_name=__UpperCamelCase ,count=2_00 ,max_id=__UpperCamelCase ) # save most recent tweets alltweets.extend(__UpperCamelCase ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Optional[Any] = alltweets[-1].id - 1 print(f"...{len(__UpperCamelCase )} tweets downloaded so far" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"new_{screen_name}_tweets.csv" ,'w' ) as f: SCREAMING_SNAKE_CASE : Optional[int] = csv.writer(__UpperCamelCase ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(__UpperCamelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("FirePing32")
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' import itertools import math def lowercase__( __UpperCamelCase: int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def lowercase__( __UpperCamelCase: int = 1_00_01 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' import os import sys import unittest UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCamelCase_ = os.path.join(git_repo_path, "src", "transformers") UpperCamelCase_ = "\n{0} = None\n" UpperCamelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" UpperCamelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(A ) SCREAMING_SNAKE_CASE : List[str] = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(A, 'tokenizers' ) SCREAMING_SNAKE_CASE : Optional[int] = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(A, 'tensorflow_text' ) SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(A, 'sentencepiece_and_tokenizers' ) SCREAMING_SNAKE_CASE : Dict = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(A, 'sentencepiece_and_tensorflow_text' ) SCREAMING_SNAKE_CASE : List[Any] = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(A, 'sentencepiece_and_tokenizers_and_vision' ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch', A ) self.assertIn('tensorflow_text', A ) self.assertIn('sentencepiece_and_tokenizers', A ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel', objects['torch'] ) self.assertIn('TFBertModel', objects['tf'] ) self.assertIn('FlaxBertModel', objects['flax'] ) self.assertIn('BertModel', objects['torch'] ) self.assertIn('TFBertTokenizer', objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer', objects['sentencepiece_and_tokenizers'] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('CONSTANT', '\'torch\'' ) self.assertEqual(A, '\nCONSTANT = None\n' ) SCREAMING_SNAKE_CASE : str = create_dummy_object('function', '\'torch\'' ) self.assertEqual( A, '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) SCREAMING_SNAKE_CASE : str = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' SCREAMING_SNAKE_CASE : Optional[int] = create_dummy_object('FakeClass', '\'torch\'' ) self.assertEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' SCREAMING_SNAKE_CASE : Dict = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'], A )
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = MvpTokenizer A : Union[str, Any] = MvpTokenizerFast A : Tuple = True A : List[Any] = filter_roberta_detectors def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : Optional[Any] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Optional[Any] = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MvpTokenizer.from_pretrained('RUCAIBox/mvp' ) @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] SCREAMING_SNAKE_CASE : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(A, max_length=len(A ), padding=A, return_tensors='pt' ) self.assertIsInstance(A, A ) self.assertEqual((2, 9), batch.input_ids.shape ) self.assertEqual((2, 9), batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Dict = batch.input_ids.tolist()[0] self.assertListEqual(A, A ) # Test that special tokens are reset @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE : Dict = tokenizer(A, padding=A, return_tensors='pt' ) # check if input_ids are returned and no labels self.assertIn('input_ids', A ) self.assertIn('attention_mask', A ) self.assertNotIn('labels', A ) self.assertNotIn('decoder_attention_mask', A ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE : int = tokenizer(text_target=A, max_length=32, padding='max_length', return_tensors='pt' ) self.assertEqual(32, targets['input_ids'].shape[1] ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE : Tuple = tokenizer( ['I am a small frog' * 1_024, 'I am a small frog'], padding=A, truncation=A, return_tensors='pt' ) self.assertIsInstance(A, A ) self.assertEqual(batch.input_ids.shape, (2, 1_024) ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ['A long paragraph for summarization.'] SCREAMING_SNAKE_CASE : Dict = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE : Tuple = tokenizer(A, text_target=A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Optional[Any] = inputs['input_ids'] SCREAMING_SNAKE_CASE : List[Any] = inputs['labels'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : List[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING UpperCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, **A ): '''simple docstring''' super().__init__(**A ) requires_backends(self, 'vision' ) requires_backends(self, 'torch' ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Tuple = {} # preprocess args if "points_per_batch" in kwargs: SCREAMING_SNAKE_CASE : List[Any] = kwargs['points_per_batch'] if "points_per_crop" in kwargs: SCREAMING_SNAKE_CASE : str = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: SCREAMING_SNAKE_CASE : Any = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: SCREAMING_SNAKE_CASE : Dict = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: SCREAMING_SNAKE_CASE : Tuple = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: SCREAMING_SNAKE_CASE : Optional[Any] = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: SCREAMING_SNAKE_CASE : Tuple = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: SCREAMING_SNAKE_CASE : Dict = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: SCREAMING_SNAKE_CASE : Optional[int] = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: SCREAMING_SNAKE_CASE : int = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: SCREAMING_SNAKE_CASE : List[str] = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: SCREAMING_SNAKE_CASE : Dict = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self, A, *A, A=None, A=None, **A ): '''simple docstring''' return super().__call__(A, *A, num_workers=A, batch_size=A, **A ) def UpperCamelCase_ ( self, A, A=64, A = 0, A = 512 / 1_500, A = 32, A = 1, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = load_image(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.size['longest_edge'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.image_processor.generate_crop_boxes( A, A, A, A, A, A ) SCREAMING_SNAKE_CASE : Any = self.image_processor(images=A, return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": SCREAMING_SNAKE_CASE : int = self.get_inference_context() with inference_context(): SCREAMING_SNAKE_CASE : Optional[int] = self._ensure_tensor_on_device(A, device=self.device ) SCREAMING_SNAKE_CASE : int = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) SCREAMING_SNAKE_CASE : str = image_embeddings SCREAMING_SNAKE_CASE : List[str] = grid_points.shape[1] SCREAMING_SNAKE_CASE : Dict = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0, A, A ): SCREAMING_SNAKE_CASE : Optional[int] = grid_points[:, i : i + points_per_batch, :, :] SCREAMING_SNAKE_CASE : Union[str, Any] = input_labels[:, i : i + points_per_batch] SCREAMING_SNAKE_CASE : str = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCamelCase_ ( self, A, A=0.88, A=0.95, A=0, A=1, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('input_boxes' ) SCREAMING_SNAKE_CASE : Any = model_inputs.pop('is_last' ) SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('original_sizes' ).tolist() SCREAMING_SNAKE_CASE : Dict = model_inputs.pop('reshaped_input_sizes' ).tolist() SCREAMING_SNAKE_CASE : List[str] = self.model(**A ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks SCREAMING_SNAKE_CASE : List[str] = model_outputs['pred_masks'] SCREAMING_SNAKE_CASE : Dict = self.image_processor.post_process_masks( A, A, A, A, binarize=A ) SCREAMING_SNAKE_CASE : List[str] = model_outputs['iou_scores'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor.filter_masks( masks[0], iou_scores[0], original_sizes[0], input_boxes[0], A, A, A, A, ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCamelCase_ ( self, A, A=False, A=False, A=0.7, ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : List[Any] = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) SCREAMING_SNAKE_CASE : str = torch.cat(A ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.post_process_for_mask_generation( A, A, A, A ) SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(A ) for output in model_outputs: for k, v in output.items(): extra[k].append(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = {} if output_rle_mask: SCREAMING_SNAKE_CASE : Optional[Any] = rle_mask if output_bboxes_mask: SCREAMING_SNAKE_CASE : str = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP UpperCamelCase_ = False try: UpperCamelCase_ = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class _a : '''simple docstring''' def __init__( self, A = None, A = [] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : int = choices SCREAMING_SNAKE_CASE : Optional[Any] = prompt if sys.platform == "win32": SCREAMING_SNAKE_CASE : Dict = '*' else: SCREAMING_SNAKE_CASE : List[str] = '➔ ' def UpperCamelCase_ ( self, A, A = "" ): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index], 32, A ) else: forceWrite(self.choices[index], A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if index == self.position: forceWrite(F" {self.arrow_char} " ) self.write_choice(A ) else: forceWrite(F" {self.choices[index]}" ) reset_cursor() def UpperCamelCase_ ( self, A, A = 1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(A ) move_cursor(A, direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP['up'] ) def UpperCamelCase_ ( self ): '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP['down'] ) def UpperCamelCase_ ( self ): '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP['newline'] ) def UpperCamelCase_ ( self ): '''simple docstring''' move_cursor(len(self.choices ) - self.position, 'DOWN' ) return self.position @input.mark(KEYMAP['interrupt'] ) def UpperCamelCase_ ( self ): '''simple docstring''' move_cursor(len(self.choices ) - self.position, 'DOWN' ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(A )] for number in range(10 )] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = int(chr(self.current_selection ) ) SCREAMING_SNAKE_CASE : Dict = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP, -movement ) elif self.position < index: self.move_direction(Direction.DOWN, A ) else: return else: return def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt, '\n' ) if in_colab: forceWrite('Please input a choice index (starting from 0), and press enter', '\n' ) else: forceWrite('Please select a choice using the arrow or number keys, and selecting with enter', '\n' ) SCREAMING_SNAKE_CASE : Tuple = default_choice for i in range(len(self.choices ) ): self.print_choice(A ) forceWrite('\n' ) move_cursor(len(self.choices ) - self.position, 'UP' ) with cursor.hide(): while True: if in_colab: try: SCREAMING_SNAKE_CASE : List[str] = int(builtins.input() ) except ValueError: SCREAMING_SNAKE_CASE : Optional[int] = default_choice else: SCREAMING_SNAKE_CASE : Any = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1, 'UP' ) clear_line() self.write_choice(A, '\n' ) return choice
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = mock.Mock() SCREAMING_SNAKE_CASE : Tuple = 500 SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Union[str, Any] = HTTPError SCREAMING_SNAKE_CASE : Tuple = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('test-feature-extractor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A, repo_id='test-feature-extractor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('valid_org/test-feature-extractor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A, repo_id='valid_org/test-feature-extractor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() SCREAMING_SNAKE_CASE : str = CustomFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('test-dynamic-feature-extractor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'}, ) SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained( F"{USER}/test-dynamic-feature-extractor", trust_remote_code=A ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, 'CustomFeatureExtractor' )
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Any = ReformerTokenizer A : Any = ReformerTokenizerFast A : Any = True A : Tuple = False A : Optional[Any] = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizer(A, keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = '<s>' SCREAMING_SNAKE_CASE : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '<unk>' ) self.assertEqual(vocab_keys[1], '<s>' ) self.assertEqual(vocab_keys[-1], 'j' ) self.assertEqual(len(A ), 1_000 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_000 ) def UpperCamelCase_ ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE : str = self.get_tokenizer() SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Tuple = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(A ) SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.tokenize(A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(A, add_special_tokens=A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(A ) self.assertListEqual(A, A ) def UpperCamelCase_ ( self, A=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(A, **A ) # Simple input SCREAMING_SNAKE_CASE : Tuple = 'This is a simple input' SCREAMING_SNAKE_CASE : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] SCREAMING_SNAKE_CASE : int = ('This is a simple input', 'This is a pair') SCREAMING_SNAKE_CASE : List[Any] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A, tokenizer_r.encode, A, max_length=A, padding='max_length' ) # Simple input self.assertRaises(A, tokenizer_r.encode_plus, A, max_length=A, padding='max_length' ) # Simple input self.assertRaises( A, tokenizer_r.batch_encode_plus, A, max_length=A, padding='max_length', ) # Pair input self.assertRaises(A, tokenizer_r.encode, A, max_length=A, padding='max_length' ) # Pair input self.assertRaises(A, tokenizer_r.encode_plus, A, max_length=A, padding='max_length' ) # Pair input self.assertRaises( A, tokenizer_r.batch_encode_plus, A, max_length=A, padding='max_length', ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizer(A, keep_accents=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ), [285, 46, 10, 170, 382], ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ], ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ], ) @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 'Hello World!' SCREAMING_SNAKE_CASE : Optional[int] = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(A, self.big_tokenizer.encode(A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) SCREAMING_SNAKE_CASE : List[Any] = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(A, self.big_tokenizer.encode(A ) ) @require_torch @slow def UpperCamelCase_ ( self ): '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence SCREAMING_SNAKE_CASE : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10] SCREAMING_SNAKE_CASE : Union[str, Any] = ' '.join(A ) SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.encode_plus(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt' ) SCREAMING_SNAKE_CASE : str = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) SCREAMING_SNAKE_CASE : List[Any] = encoded_sequence['input_ids'].shape SCREAMING_SNAKE_CASE : List[str] = ReformerModel(A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**A ) model(**A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 SCREAMING_SNAKE_CASE : Dict = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=A, model_name='google/reformer-crime-and-punishment', revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a', padding=A, sequences=A, )
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' UpperCamelCase_ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [False] * len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [s] SCREAMING_SNAKE_CASE : Optional[Any] = True while queue: SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Any = u return visited[t] def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [-1] * (len(__UpperCamelCase )) SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : List[Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : str = float('Inf' ) SCREAMING_SNAKE_CASE : str = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE : List[str] = min(__UpperCamelCase ,graph[parent[s]][s] ) SCREAMING_SNAKE_CASE : Optional[Any] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE : Union[str, Any] = sink while v != source: SCREAMING_SNAKE_CASE : Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE : Dict = parent[v] for i in range(len(__UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' from __future__ import annotations import math def lowercase__( __UpperCamelCase: int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for num in range(len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : List[Any] = 0 while 2 * i * i <= odd_composites[num]: SCREAMING_SNAKE_CASE : Any = odd_composites[num] - 2 * i * i if is_prime(__UpperCamelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__UpperCamelCase ) == n: return list_nums return [] def lowercase__( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowercase__( __UpperCamelCase: Tuple ): """simple docstring""" return x + 2 class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 'x = 3' SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(A, {}, state=A ) assert result == 3 self.assertDictEqual(A, {'x': 3} ) SCREAMING_SNAKE_CASE : Any = 'x = y' SCREAMING_SNAKE_CASE : Tuple = {'y': 5} SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(A, {'x': 5, 'y': 5} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 'y = add_two(x)' SCREAMING_SNAKE_CASE : str = {'x': 3} SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(A, {'add_two': add_two}, state=A ) assert result == 5 self.assertDictEqual(A, {'x': 3, 'y': 5} ) # Won't work without the tool with CaptureStdout() as out: SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 'x = 3' SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A ) assert result == 3 self.assertDictEqual(A, {'x': 3} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}' SCREAMING_SNAKE_CASE : List[Any] = {'x': 3} SCREAMING_SNAKE_CASE : List[Any] = evaluate(A, {'add_two': add_two}, state=A ) self.assertDictEqual(A, {'x': 3, 'y': 5} ) self.assertDictEqual(A, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = 'x = 3\ny = 5' SCREAMING_SNAKE_CASE : Tuple = {} SCREAMING_SNAKE_CASE : str = evaluate(A, {}, state=A ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(A, {'x': 3, 'y': 5} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 'text = f\'This is x: {x}.\'' SCREAMING_SNAKE_CASE : Any = {'x': 3} SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(A, {}, state=A ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(A, {'x': 3, 'text': 'This is x: 3.'} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'if x <= 3:\n y = 2\nelse:\n y = 5' SCREAMING_SNAKE_CASE : List[Any] = {'x': 3} SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {}, state=A ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(A, {'x': 3, 'y': 2} ) SCREAMING_SNAKE_CASE : Optional[int] = {'x': 8} SCREAMING_SNAKE_CASE : str = evaluate(A, {}, state=A ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(A, {'x': 8, 'y': 5} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'test_list = [x, add_two(x)]' SCREAMING_SNAKE_CASE : Dict = {'x': 3} SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {'add_two': add_two}, state=A ) self.assertListEqual(A, [3, 5] ) self.assertDictEqual(A, {'x': 3, 'test_list': [3, 5]} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 'y = x' SCREAMING_SNAKE_CASE : Any = {'x': 3} SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A ) assert result == 3 self.assertDictEqual(A, {'x': 3, 'y': 3} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = 'test_list = [x, add_two(x)]\ntest_list[1]' SCREAMING_SNAKE_CASE : Any = {'x': 3} SCREAMING_SNAKE_CASE : str = evaluate(A, {'add_two': add_two}, state=A ) assert result == 5 self.assertDictEqual(A, {'x': 3, 'test_list': [3, 5]} ) SCREAMING_SNAKE_CASE : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']' SCREAMING_SNAKE_CASE : Optional[int] = {'x': 3} SCREAMING_SNAKE_CASE : List[Any] = evaluate(A, {'add_two': add_two}, state=A ) assert result == 5 self.assertDictEqual(A, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = 'x = 0\nfor i in range(3):\n x = i' SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {'range': range}, state=A ) assert result == 2 self.assertDictEqual(A, {'x': 2, 'i': 2} )
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' from __future__ import annotations from collections import deque class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : list[dict] = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(A ) self.set_fail_transitions() def UpperCamelCase_ ( self, A, A ): '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = 0 for character in keyword: SCREAMING_SNAKE_CASE : Any = self.find_next_state(A, A ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) SCREAMING_SNAKE_CASE : List[str] = len(self.adlist ) - 1 else: SCREAMING_SNAKE_CASE : int = next_state self.adlist[current_state]["output"].append(A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : deque = deque() for node in self.adlist[0]["next_states"]: q.append(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 0 while q: SCREAMING_SNAKE_CASE : List[Any] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.adlist[r]['fail_state'] while ( self.find_next_state(A, self.adlist[child]['value'] ) is None and state != 0 ): SCREAMING_SNAKE_CASE : str = self.adlist[state]['fail_state'] SCREAMING_SNAKE_CASE : int = self.find_next_state( A, self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : int = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : dict = {} # returns a dict with keywords and list of its occurrences SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(len(A ) ): while ( self.find_next_state(A, string[i] ) is None and current_state != 0 ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[current_state]['fail_state'] SCREAMING_SNAKE_CASE : Tuple = self.find_next_state(A, string[i] ) if next_state is None: SCREAMING_SNAKE_CASE : Any = 0 else: SCREAMING_SNAKE_CASE : Dict = next_state for key in self.adlist[current_state]["output"]: if key not in result: SCREAMING_SNAKE_CASE : Dict = [] result[key].append(i - len(A ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1