code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME snake_case__ : Optional[Any] = ["""small""", """medium""", """large"""] snake_case__ : List[str] = """lm_head.decoder.weight""" snake_case__ : Union[str, Any] = """lm_head.weight""" def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: __lowercase = torch.load(__SCREAMING_SNAKE_CASE ) __lowercase = d.pop(__SCREAMING_SNAKE_CASE ) os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) snake_case__ : List[Any] = parser.parse_args() for MODEL in DIALOGPT_MODELS: snake_case__ : List[Any] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''') snake_case__ : Optional[int] = F'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
704
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = StableUnCLIPImgaImgPipeline _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : int = frozenset([] ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = 32 __lowercase = embedder_hidden_size # image encoding components __lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __lowercase = input_image * 0.5 + 0.5 __lowercase = input_image.clamp(0 , 1 ) __lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _snake_case ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
655
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer snake_case__ : str = logging.get_logger(__name__) snake_case__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} snake_case__ : List[str] = { "vocab_file": { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt", "bert-base-multilingual-uncased": ( "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt" ), "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt" ), "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", "bert-base-german-dbmdz-uncased": ( "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json", "bert-base-multilingual-uncased": ( "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json" ), "bert-base-multilingual-cased": ( "https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json" ), "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json" ), "bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json" ), "bert-base-german-dbmdz-cased": ( "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json" ), "bert-base-german-dbmdz-uncased": ( "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json" ), }, } snake_case__ : str = { "bert-base-uncased": 5_12, "bert-large-uncased": 5_12, "bert-base-cased": 5_12, "bert-large-cased": 5_12, "bert-base-multilingual-uncased": 5_12, "bert-base-multilingual-cased": 5_12, "bert-base-chinese": 5_12, "bert-base-german-cased": 5_12, "bert-large-uncased-whole-word-masking": 5_12, "bert-large-cased-whole-word-masking": 5_12, "bert-large-uncased-whole-word-masking-finetuned-squad": 5_12, "bert-large-cased-whole-word-masking-finetuned-squad": 5_12, "bert-base-cased-finetuned-mrpc": 5_12, "bert-base-german-dbmdz-cased": 5_12, "bert-base-german-dbmdz-uncased": 5_12, "TurkuNLP/bert-base-finnish-cased-v1": 5_12, "TurkuNLP/bert-base-finnish-uncased-v1": 5_12, "wietsedv/bert-base-dutch-cased": 5_12, } snake_case__ : Union[str, Any] = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } class _A ( _UpperCAmelCase ): '''simple docstring''' _snake_case : Tuple = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[Any] = PRETRAINED_INIT_CONFIGURATION _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Tuple = BertTokenizer def __init__( self : List[str] , lowerCamelCase : int=None , lowerCamelCase : Tuple=None , lowerCamelCase : str=True , lowerCamelCase : Union[str, Any]="[UNK]" , lowerCamelCase : int="[SEP]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : List[str]="[CLS]" , lowerCamelCase : Dict="[MASK]" , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=None , **lowerCamelCase : str , ): '''simple docstring''' super().__init__( A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , ) __lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , A_ ) != do_lower_case or normalizer_state.get("strip_accents" , A_ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , A_ ) != tokenize_chinese_chars ): __lowercase = getattr(A_ , normalizer_state.pop("type" ) ) __lowercase = do_lower_case __lowercase = strip_accents __lowercase = tokenize_chinese_chars __lowercase = normalizer_class(**A_ ) __lowercase = do_lower_case def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : Dict=None ): '''simple docstring''' __lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' __lowercase = self._tokenizer.model.save(A_ , name=A_ ) return tuple(A_ )
705
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _A ( _lowercase , _lowercase ): '''simple docstring''' @register_to_config def __init__( self : Optional[Any] , *, lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__() __lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) ) # parameters for additional clip time embeddings __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) # parameters for encoder hidden states __lowercase = clip_extra_context_tokens __lowercase = nn.Linear( lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.LayerNorm(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowercase = image_embeddings.shape[0] __lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowercase = classifier_free_guidance_embeddings.expand( lowerCamelCase , -1 ) __lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowercase = self.embedding_proj(lowerCamelCase ) __lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase ) __lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase ) __lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens ) __lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowercase = self.encoder_hidden_states_proj(lowerCamelCase ) __lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase ) __lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
655
0
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin snake_case__ : Tuple = random.Random() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ): if rng is None: __lowercase = global_rng __lowercase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _A ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple=7 , lowerCamelCase : Union[str, Any]=400 , lowerCamelCase : Optional[int]=2_000 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Union[str, Any]=16_000 , lowerCamelCase : Any=True , lowerCamelCase : str=True , ): '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = min_seq_length __lowercase = max_seq_length __lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowercase = feature_size __lowercase = padding_value __lowercase = sampling_rate __lowercase = return_attention_mask __lowercase = do_normalize def _snake_case ( self : List[Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _snake_case ( self : Optional[int] , lowerCamelCase : List[str]=False , lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' def _flatten(lowerCamelCase : Optional[Any] ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __lowercase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowercase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowercase = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs class _A ( __lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : List[Any] = WavaVecaFeatureExtractor def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = WavaVecaFeatureExtractionTester(self ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __lowercase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __lowercase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) ) # Test batched __lowercase = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __lowercase = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowercase = np.asarray(lowerCamelCase ) __lowercase = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __lowercase = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = ["longest", "max_length", "do_not_pad"] __lowercase = [None, 1_600, None] for max_length, padding in zip(lowerCamelCase , lowerCamelCase ): __lowercase = feat_extract(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors="np" ) __lowercase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self.assertTrue(input_values[0][1_000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = range(800 , 1_400 , 200 ) __lowercase = [floats_list((1, x) )[0] for x in lengths] __lowercase = ["longest", "max_length", "do_not_pad"] __lowercase = [None, 1_600, None] for max_length, padding in zip(lowerCamelCase , lowerCamelCase ): __lowercase = feat_extract(lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase ) __lowercase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = feat_extract( lowerCamelCase , truncation=lowerCamelCase , max_length=1_000 , padding="max_length" , return_tensors="np" ) __lowercase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = feat_extract( lowerCamelCase , truncation=lowerCamelCase , max_length=1_000 , padding="longest" , return_tensors="np" ) __lowercase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_000) ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = feat_extract( lowerCamelCase , truncation=lowerCamelCase , max_length=2_000 , padding="longest" , return_tensors="np" ) __lowercase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_200) ) @require_torch def _snake_case ( self : Optional[int] ): '''simple docstring''' import torch __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = np.random.rand(100 ).astype(np.floataa ) __lowercase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowercase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowercase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def _snake_case ( self : Any ): '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowercase = WavaVecaConfig.from_pretrained(lowerCamelCase ) __lowercase = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
706
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case__ : Union[str, Any] = TypeVar("""T""") snake_case__ : Optional[int] = TypeVar("""U""") class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ): '''simple docstring''' __lowercase = key __lowercase = val __lowercase = None __lowercase = None def __repr__( self : Any ): '''simple docstring''' return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase , __lowercase = self.rear, self.head def __repr__( self : Optional[Any] ): '''simple docstring''' __lowercase = ["DoubleLinkedList"] __lowercase = self.head while node.next is not None: rep.append(str(lowerCamelCase ) ) __lowercase = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' __lowercase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __lowercase = node __lowercase = previous __lowercase = node __lowercase = self.rear def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None __lowercase = node.next __lowercase = node.prev __lowercase = None __lowercase = None return node class _A ( Generic[T, U] ): '''simple docstring''' _snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = DoubleLinkedList() __lowercase = capacity __lowercase = 0 __lowercase = 0 __lowercase = 0 __lowercase = {} def __repr__( self : Optional[Any] ): '''simple docstring''' return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : Dict , lowerCamelCase : T ): '''simple docstring''' return key in self.cache def _snake_case ( self : List[Any] , lowerCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 __lowercase = self.cache[key] __lowercase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowerCamelCase ) return node.val self.miss += 1 return None def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __lowercase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowerCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __lowercase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __lowercase = value self.list.add(lowerCamelCase ) @classmethod def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ): '''simple docstring''' def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*lowerCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: __lowercase = LRUCache(lowerCamelCase ) __lowercase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __lowercase = func(*lowerCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) __lowercase = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" __lowercase = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" __lowercase = max(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(_SCREAMING_SNAKE_CASE ) , b_binary.zfill(_SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
707
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) snake_case__ : Optional[Any] = logging.getLogger() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = "\n".join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random""" snake_case__ : int = """sshleifer/bart-tiny-random""" snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart""" snake_case__ : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _A ( _lowercase ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(lowerCamelCase , lowerCamelCase ) __lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_generate() assert Path(lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self : Dict ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __lowercase = Path(self.get_auto_remove_tmp_dir() ) __lowercase = str(tmp_dir / "scores.json" ) __lowercase = str(tmp_dir / "val.target" ) _dump_articles(lowerCamelCase , text["en"] ) _dump_articles(lowerCamelCase , text["de"] ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {str(lowerCamelCase )} {str(lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): with CaptureStdout() as cs: run_search() __lowercase = [" num_beams | length_penalty", model, "Best score args"] __lowercase = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase ).exists() os.remove(Path(lowerCamelCase ) )
655
0
from heapq import heappop, heappush import numpy as np def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowercase = grid.shape __lowercase = [-1, 1, 0, 0] __lowercase = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __lowercase = [(0, source)], set() __lowercase = np.full((rows, cols) , np.inf ) __lowercase = 0 __lowercase = np.empty((rows, cols) , dtype=_SCREAMING_SNAKE_CASE ) __lowercase = None while queue: (__lowercase) = heappop(_SCREAMING_SNAKE_CASE ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __lowercase = [] while (x, y) != source: path.append((x, y) ) __lowercase = predecessors[x, y] path.append(_SCREAMING_SNAKE_CASE ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __lowercase = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) ) __lowercase = dist + 1 __lowercase = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
708
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : int _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""") def snake_case_ ( _SCREAMING_SNAKE_CASE ): if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowercase , __lowercase = get_distrib(node.left ) __lowercase , __lowercase = get_distrib(node.right ) __lowercase = 1 - left_distrib_excess __lowercase = 1 - right_distrib_excess __lowercase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) __lowercase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
655
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = 3_8_4 __lowercase = 7 if "tiny" in model_name: __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif "small" in model_name: __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif "base" in model_name: __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) __lowercase = 1_2 __lowercase = 5_1_2 elif "large" in model_name: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) __lowercase = 1_2 __lowercase = 7_6_8 # set label information __lowercase = 1_5_0 __lowercase = 'huggingface/label-files' __lowercase = 'ade20k-id2label.json' __lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) ) __lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} __lowercase = {v: k for k, v in idalabel.items()} __lowercase = SwinConfig( embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , window_size=lowerCAmelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , ) __lowercase = UperNetConfig( backbone_config=lowerCAmelCase__ , auxiliary_in_channels=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , ) return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = dct.pop(lowerCAmelCase__ ) __lowercase = val def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) __lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[:dim, :] __lowercase = in_proj_bias[: dim] __lowercase = in_proj_weight[ dim : dim * 2, : ] __lowercase = in_proj_bias[ dim : dim * 2 ] __lowercase = in_proj_weight[ -dim :, : ] __lowercase = in_proj_bias[-dim :] # fmt: on def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = x.shape __lowercase = x.reshape(lowerCAmelCase__ , 4 , in_channel // 4 ) __lowercase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) return x def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = x.shape __lowercase = x.reshape(lowerCAmelCase__ , in_channel // 4 , 4 ) __lowercase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) return x def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = x.shape[0] __lowercase = x.reshape(4 , in_channel // 4 ) __lowercase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCAmelCase__ ) return x def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = x.shape[0] __lowercase = x.reshape(in_channel // 4 , 4 ) __lowercase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCAmelCase__ ) return x def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } __lowercase = model_name_to_url[model_name] __lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="cpu" , file_name=lowerCAmelCase__ )[ 'state_dict' ] for name, param in state_dict.items(): print(lowerCAmelCase__ , param.shape ) __lowercase = get_upernet_config(lowerCAmelCase__ ) __lowercase = UperNetForSemanticSegmentation(lowerCAmelCase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __lowercase = state_dict.pop(lowerCAmelCase__ ) if "bn" in key: __lowercase = key.replace("bn" , "batch_norm" ) __lowercase = val # rename keys __lowercase = create_rename_keys(lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) read_in_q_k_v(lowerCAmelCase__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: __lowercase = reverse_correct_unfold_reduction_order(lowerCAmelCase__ ) if "norm" in key: __lowercase = reverse_correct_unfold_norm_order(lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) # verify on image __lowercase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' __lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" ) __lowercase = SegformerImageProcessor() __lowercase = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values with torch.no_grad(): __lowercase = model(lowerCAmelCase__ ) __lowercase = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": __lowercase = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": __lowercase = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": __lowercase = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": __lowercase = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase__ ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": snake_case__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[F'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ : Any = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
709
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = SwinvaConfig() __lowercase = swinva_name.split("_" ) __lowercase = name_split[1] if "to" in name_split[3]: __lowercase = int(name_split[3][-3:] ) else: __lowercase = int(name_split[3] ) if "to" in name_split[2]: __lowercase = int(name_split[2][-2:] ) else: __lowercase = int(name_split[2][6:] ) if model_size == "tiny": __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "small": __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "base": __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) else: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: __lowercase = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowercase = 2_1_8_4_1 __lowercase = "huggingface/label-files" __lowercase = "imagenet-22k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} else: __lowercase = 1_0_0_0 __lowercase = "huggingface/label-files" __lowercase = "imagenet-1k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = img_size __lowercase = num_classes __lowercase = embed_dim __lowercase = depths __lowercase = num_heads __lowercase = window_size return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): if "patch_embed.proj" in name: __lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowercase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowercase = "encoder." + name if "attn.proj" in name: __lowercase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowercase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowercase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowercase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowercase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowercase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __lowercase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __lowercase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __lowercase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __lowercase = "layernorm.weight" if name == "norm.bias": __lowercase = "layernorm.bias" if "head" in name: __lowercase = name.replace("head" , "classifier" ) else: __lowercase = "swinv2." + name return name def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for key in orig_state_dict.copy().keys(): __lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __lowercase = key.split("." ) __lowercase = int(key_split[1] ) __lowercase = int(key_split[3] ) __lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowercase = val[:dim, :] __lowercase = val[dim : dim * 2, :] __lowercase = val[-dim:, :] else: __lowercase = val[:dim] __lowercase = val[ dim : dim * 2 ] __lowercase = val[-dim:] else: __lowercase = val return orig_state_dict def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE ) __lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) __lowercase = timm_model(inputs["pixel_values"] ) __lowercase = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
655
0
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _A ( a__ ): '''simple docstring''' _snake_case : int = 0 _snake_case : bool = False _snake_case : float = 3.0 class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=lowerCamelCase_ ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() __lowercase = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __lowercase = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , lowerCamelCase_ ) @require_multi_gpu def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) snake_case__ : Any = Accelerator(kwargs_handlers=[ddp_scaler]) snake_case__ : Optional[Any] = torch.nn.Linear(1_00, 2_00) snake_case__ : Optional[Any] = accelerator.prepare(model) # Check the values changed in kwargs snake_case__ : str = """""" snake_case__ : List[Any] = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
710
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } snake_case__ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case_ ( ): __lowercase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char return pairs class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __lowercase = json.load(lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : Optional[int] ): '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(lowerCamelCase ) __lowercase = get_pairs(lowerCamelCase ) if not pairs: return token while True: __lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(lowerCamelCase ): try: __lowercase = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(lowerCamelCase ) __lowercase = new_word if len(lowerCamelCase ) == 1: break else: __lowercase = get_pairs(lowerCamelCase ) __lowercase = " ".join(lowerCamelCase ) __lowercase = word return word def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = [] for token in re.findall(self.pat , lowerCamelCase ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __lowercase = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): '''simple docstring''' __lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' __lowercase = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: __lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
655
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case__ : str = { """configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""], """tokenization_tapas""": ["""TapasTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ """TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TapasForMaskedLM""", """TapasForQuestionAnswering""", """TapasForSequenceClassification""", """TapasModel""", """TapasPreTrainedModel""", """load_tf_weights_in_tapas""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ """TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFTapasForMaskedLM""", """TFTapasForQuestionAnswering""", """TFTapasForSequenceClassification""", """TFTapasModel""", """TFTapasPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys snake_case__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
711
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] # Creating a copy of the list and sorting profit/weight in ascending order __lowercase = sorted(_SCREAMING_SNAKE_CASE ) # declaring useful variables __lowercase = len(_SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __lowercase = sorted_profit_by_weight[length - i - 1] __lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE ) __lowercase = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()] snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()] snake_case__ : Optional[Any] = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
655
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case__ : str = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"vocab_file": "spiece.model"} snake_case__ : str = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } snake_case__ : Optional[Any] = { "albert-base-v1": 5_12, "albert-large-v1": 5_12, "albert-xlarge-v1": 5_12, "albert-xxlarge-v1": 5_12, "albert-base-v2": 5_12, "albert-large-v2": 5_12, "albert-xlarge-v2": 5_12, "albert-xxlarge-v2": 5_12, } snake_case__ : Union[str, Any] = "▁" class _A ( __UpperCAmelCase ): '''simple docstring''' _snake_case : Any = VOCAB_FILES_NAMES _snake_case : int = PRETRAINED_VOCAB_FILES_MAP _snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=True , lowerCamelCase : List[str]=True , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : str="[CLS]" , lowerCamelCase : int="[SEP]" , lowerCamelCase : Dict="<unk>" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Any="<pad>" , lowerCamelCase : Optional[int]="[CLS]" , lowerCamelCase : Any="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : str , ): '''simple docstring''' __lowercase = ( AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token ) __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) __lowercase = do_lower_case __lowercase = remove_space __lowercase = keep_accents __lowercase = vocab_file __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase_ ) @property def _snake_case ( self : Optional[Any] ): '''simple docstring''' return len(self.sp_model ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): '''simple docstring''' __lowercase = self.__dict__.copy() __lowercase = None return state def __setstate__( self : Optional[Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __lowercase = {} __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] ): '''simple docstring''' if self.remove_space: __lowercase = " ".join(inputs.strip().split() ) else: __lowercase = inputs __lowercase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: __lowercase = unicodedata.normalize("NFKD" , lowerCAmelCase_ ) __lowercase = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] ) if self.do_lower_case: __lowercase = outputs.lower() return outputs def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = self.preprocess_text(lowerCAmelCase_ ) __lowercase = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) __lowercase = [] for piece in pieces: if len(lowerCAmelCase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): __lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __lowercase = cur_pieces[1:] else: __lowercase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowerCAmelCase_ ) else: new_pieces.append(lowerCAmelCase_ ) return new_pieces def _snake_case ( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' return self.sp_model.PieceToId(lowerCAmelCase_ ) def _snake_case ( self : Dict , lowerCamelCase : Dict ): '''simple docstring''' return self.sp_model.IdToPiece(lowerCAmelCase_ ) def _snake_case ( self : Optional[int] , lowerCamelCase : int ): '''simple docstring''' __lowercase = [] __lowercase = "" __lowercase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase_ ) + token __lowercase = True __lowercase = [] else: current_sub_tokens.append(lowerCAmelCase_ ) __lowercase = False out_string += self.sp_model.decode(lowerCAmelCase_ ) return out_string.strip() def _snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1] def _snake_case ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , "wb" ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,)
712
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """openai/whisper-base""" _snake_case : Union[str, Any] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) _snake_case : Any = """transcriber""" _snake_case : Any = WhisperProcessor _snake_case : Optional[int] = WhisperForConditionalGeneration _snake_case : str = ["""audio"""] _snake_case : Optional[int] = ["""text"""] def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features def _snake_case ( self : str , lowerCamelCase : List[Any] ): '''simple docstring''' return self.model.generate(inputs=lowerCamelCase ) def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
655
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : Optional[Any] = logging.get_logger(__name__) class _A ( _UpperCamelCase ): '''simple docstring''' _snake_case : Optional[int] = ["""pixel_values"""] def __init__( self : str , lowerCamelCase : Tuple = True , lowerCamelCase : List[str] = None , lowerCamelCase : Tuple = None , lowerCamelCase : Optional[Any] = PILImageResampling.BILINEAR , lowerCamelCase : Union[str, Any] = True , lowerCamelCase : str = 1 / 255 , lowerCamelCase : Optional[Any] = True , lowerCamelCase : int = None , lowerCamelCase : List[str] = None , **lowerCamelCase : str , ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) __lowercase = size if size is not None else {'''shortest_edge''': 384} __lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) __lowercase = do_resize __lowercase = size # Default value set here for backwards compatibility where the value in config is None __lowercase = crop_pct if crop_pct is not None else 224 / 256 __lowercase = resample __lowercase = do_rescale __lowercase = rescale_factor __lowercase = do_normalize __lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self : int , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] = PILImageResampling.BICUBIC , lowerCamelCase : Union[str, Any] = None , **lowerCamelCase : Any , ): '''simple docstring''' __lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) __lowercase = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __lowercase = int(shortest_edge / crop_pct ) __lowercase = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase ) __lowercase = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] = None , **lowerCamelCase : int , ): '''simple docstring''' return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Dict = None , **lowerCamelCase : List[str] , ): '''simple docstring''' return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] = None , lowerCamelCase : Tuple = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = None , lowerCamelCase : Any = None , lowerCamelCase : Optional[Any] = None , lowerCamelCase : Dict = None , lowerCamelCase : Optional[Any] = None , lowerCamelCase : List[str] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Union[str, Any] = ChannelDimension.FIRST , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = do_resize if do_resize is not None else self.do_resize __lowercase = crop_pct if crop_pct is not None else self.crop_pct __lowercase = resample if resample is not None else self.resample __lowercase = do_rescale if do_rescale is not None else self.do_rescale __lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = image_mean if image_mean is not None else self.image_mean __lowercase = image_std if image_std is not None else self.image_std __lowercase = size if size is not None else self.size __lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) __lowercase = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: __lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: __lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: __lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] __lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] __lowercase = {'''pixel_values''': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
713
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _A : '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["prompt"] __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] if "image" in inputs: __lowercase = inputs["image"] else: __lowercase = None if "mask_image" in inputs: __lowercase = inputs["mask_image"] else: __lowercase = None if "original_image" in inputs: __lowercase = inputs["original_image"] else: __lowercase = None __lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase ) # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 )
655
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask snake_case__ : Optional[int] = logging.getLogger(__name__) class _A ( _a ): '''simple docstring''' def __init__( self : int , lowerCamelCase : Optional[Any]=-1 ): '''simple docstring''' __lowercase = label_idx def _snake_case ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ): '''simple docstring''' if isinstance(_A , _A ): __lowercase = mode.value __lowercase = os.path.join(_A , f"""{mode}.txt""" ) __lowercase = 1 __lowercase = [] with open(_A , encoding="utf-8" ) as f: __lowercase = [] __lowercase = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) guid_index += 1 __lowercase = [] __lowercase = [] else: __lowercase = line.split(" " ) words.append(splits[0] ) if len(_A ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) return examples def _snake_case ( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Any ): '''simple docstring''' __lowercase = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(_A ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __lowercase = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(_A ) else: logger.warning("Maximum sequence length exceeded: No prediction for \'%s\'." , line.split()[0] ) def _snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] ): '''simple docstring''' if path: with open(_A , "r" ) as f: __lowercase = f.read().splitlines() if "O" not in labels: __lowercase = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _A ( _a ): '''simple docstring''' def __init__( self : Optional[int] ): '''simple docstring''' super().__init__(label_idx=-2 ) def _snake_case ( self : str , lowerCamelCase : str ): '''simple docstring''' if path: with open(_A , "r" ) as f: __lowercase = f.read().splitlines() if "O" not in labels: __lowercase = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _A ( _a ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' if isinstance(_A , _A ): __lowercase = mode.value __lowercase = os.path.join(_A , f"""{mode}.txt""" ) __lowercase = 1 __lowercase = [] with open(_A , encoding="utf-8" ) as f: for sentence in parse_incr(_A ): __lowercase = [] __lowercase = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(_A ) == len(_A ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) guid_index += 1 return examples def _snake_case ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any ): '''simple docstring''' __lowercase = 0 for sentence in parse_incr(_A ): __lowercase = preds_list[example_id] __lowercase = "" for token in sentence: out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(_A ) example_id += 1 def _snake_case ( self : int , lowerCamelCase : int ): '''simple docstring''' if path: with open(_A , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
714
import numpy as np snake_case__ : Tuple = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class _A : '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase , __lowercase = np.where(letter == self.SQUARE ) __lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() __lowercase = message.replace(" " , "" ) __lowercase = message.replace("j" , "i" ) __lowercase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape(2 * len(lowerCamelCase ) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[numbers_index * 2] ) __lowercase = int(second_step[(numbers_index * 2) + 1] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = encoded_message + letter return encoded_message def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() message.replace(" " , "" ) __lowercase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape((2, len(lowerCamelCase )) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[0, numbers_index] ) __lowercase = int(second_step[1, numbers_index] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = decoded_message + letter return decoded_message
655
0
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : str = WavaVecaPhonemeCTCTokenizer _snake_case : List[str] = False def _snake_case ( self : str ): '''simple docstring''' super().setUp() __lowercase = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" " ) __lowercase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) __lowercase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__UpperCamelCase ) + "\n" ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[Any]=5 ): '''simple docstring''' __lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCamelCase )) for i in range(len(__UpperCamelCase ) )] __lowercase = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__UpperCamelCase ) , __UpperCamelCase ) ) if max_length is not None and len(__UpperCamelCase ) > max_length: __lowercase = toks[:max_length] if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0: while len(__UpperCamelCase ) < min_length: __lowercase = toks + toks # toks_str = [t[1] for t in toks] __lowercase = [t[0] for t in toks] # Ensure consistency __lowercase = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) if " " not in output_txt and len(__UpperCamelCase ) > 1: __lowercase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCamelCase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCamelCase ) ) if with_prefix_space: __lowercase = " " + output_txt __lowercase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) return output_txt, output_ids def _snake_case ( self : str , **lowerCamelCase : str ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) # check adding a single token tokenizer.add_tokens("xxx" ) __lowercase = tokenizer("m xxx ɪ" , do_phonemize=__UpperCamelCase ).input_ids self.assertEqual(__UpperCamelCase , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"] ) __lowercase = tokenizer("m aaa ɪ ccc" , do_phonemize=__UpperCamelCase ).input_ids self.assertEqual(__UpperCamelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa __lowercase = tokenizer("maɪ c" , do_phonemize=__UpperCamelCase ).input_ids self.assertEqual(__UpperCamelCase , [3, 200] ) # mai should be <unk> (=3) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) self.assertEqual(__UpperCamelCase , "h ə l oʊ h aʊ ɑːɹ j uː" ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(__UpperCamelCase ).input_ids , tokenizer(__UpperCamelCase , do_phonemize=__UpperCamelCase ).input_ids ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) __lowercase = tokenizer.decode(tokenizer(__UpperCamelCase ).input_ids ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) __lowercase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] __lowercase = tokenizer.decode(sample_ids[0] ) __lowercase = tokenizer.batch_decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , batch_tokens[0] ) self.assertEqual(__UpperCamelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) self.assertEqual(__UpperCamelCase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(__UpperCamelCase ).input_ids , tokenizer(__UpperCamelCase , do_phonemize=__UpperCamelCase ).input_ids ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off __lowercase = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter __lowercase = tokenizer.decode(sample_ids[0] ) __lowercase = tokenizer.batch_decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , batch_tokens[0] ) self.assertEqual(__UpperCamelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) # decode with no word_del_token filter __lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__UpperCamelCase ) __lowercase = tokenizer.batch_decode(__UpperCamelCase , filter_word_delimiter_token=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , batch_tokens[0] ) self.assertEqual(__UpperCamelCase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) __lowercase = tokenizer.decode(tokenizer(__UpperCamelCase ).input_ids , filter_word_delimiter_token=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) __lowercase = "Hello how are you" __lowercase = tokenizer.phonemize(__UpperCamelCase , phonemizer_lang="en-us" ) __lowercase = tokenizer.decode(tokenizer(__UpperCamelCase ).input_ids , filter_word_delimiter_token=__UpperCamelCase ) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __UpperCamelCase ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__UpperCamelCase ) __lowercase = "Hello how are you" __lowercase = tokenizer(__UpperCamelCase , phonemizer_lang="en-us" ).input_ids __lowercase = tokenizer(__UpperCamelCase , phonemizer_lang="fr-fr" ).input_ids self.assertNotEqual(__UpperCamelCase , __UpperCamelCase ) __lowercase = tokenizer.decode(__UpperCamelCase ) __lowercase = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , "h ə l oʊ h aʊ ɑːɹ j uː" ) self.assertEqual(__UpperCamelCase , "ɛ l o h aʊ a ʁ j u" ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) __lowercase = "Hello how Are you" __lowercase = "hello how are you" __lowercase = tokenizer(__UpperCamelCase ).input_ids __lowercase = tokenizer(__UpperCamelCase ).input_ids self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) tokenizer.add_tokens(["!", "?"] ) tokenizer.add_special_tokens({"cls_token": "$$$"} ) # fmt: off __lowercase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on __lowercase = tokenizer.batch_decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] ) @staticmethod def _snake_case ( lowerCamelCase : str , lowerCamelCase : int ): '''simple docstring''' __lowercase = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : int ): '''simple docstring''' __lowercase = self.get_tokenizer(word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" __lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on __lowercase = tokenizer.decode(__UpperCamelCase , output_char_offsets=__UpperCamelCase , filter_word_delimiter_token=__UpperCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue("text" in outputs ) self.assertTrue("char_offsets" in outputs ) self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.get_tokenizer(word_delimiter_token="|" ) def check_list_tuples_equal(lowerCamelCase : int , lowerCamelCase : int ): self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) ) self.assertTrue(isinstance(outputs_list[0] , __UpperCamelCase ) ) # transform list to ModelOutput __lowercase = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] ) def recursive_check(lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ): if isinstance(__UpperCamelCase , __UpperCamelCase ): [recursive_check(__UpperCamelCase , __UpperCamelCase ) for la, la in zip(__UpperCamelCase , __UpperCamelCase )] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] ) # fmt: off __lowercase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char __lowercase = tokenizer.batch_decode(__UpperCamelCase , output_char_offsets=__UpperCamelCase ) __lowercase = [tokenizer.decode(__UpperCamelCase , output_char_offsets=__UpperCamelCase ) for ids in sample_ids] check_list_tuples_equal(__UpperCamelCase , __UpperCamelCase ) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" ) def _snake_case ( self : str ): '''simple docstring''' pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" ) def _snake_case ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" ) def _snake_case ( self : Dict ): '''simple docstring''' pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" ) def _snake_case ( self : int ): '''simple docstring''' pass def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = self.get_tokenizers(do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowercase = tokenizer.vocab_size __lowercase = len(__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __lowercase = ["aaaaa bbbbbb", "cccccccccdddddddd"] __lowercase = tokenizer.add_tokens(__UpperCamelCase ) __lowercase = tokenizer.vocab_size __lowercase = len(__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase , 0 ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) ) __lowercase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCamelCase ) self.assertGreaterEqual(len(__UpperCamelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __lowercase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} __lowercase = tokenizer.add_special_tokens(__UpperCamelCase ) __lowercase = tokenizer.vocab_size __lowercase = len(__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase , 0 ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) ) __lowercase = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCamelCase ) self.assertGreaterEqual(len(__UpperCamelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _snake_case ( self : List[str] ): '''simple docstring''' pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' pass def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.get_tokenizers(fast=__UpperCamelCase , do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowercase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] __lowercase = tokenizer.convert_tokens_to_string(__UpperCamelCase ) self.assertIsInstance(output["text"] , __UpperCamelCase )
715
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _A ( ctypes.Structure ): '''simple docstring''' _snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def snake_case_ ( ): try: hide_cursor() yield finally: show_cursor()
655
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class _A ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict=7 , lowerCamelCase : Tuple=3 , lowerCamelCase : Dict=18 , lowerCamelCase : Any=30 , lowerCamelCase : List[Any]=400 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=True , lowerCamelCase : List[str]=None , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]=[0.4814_5466, 0.457_8275, 0.4082_1073] , lowerCamelCase : List[Any]=[0.2686_2954, 0.2613_0258, 0.2757_7711] , lowerCamelCase : str=True , ): '''simple docstring''' __lowercase = size if size is not None else {"height": 224, "width": 224} __lowercase = crop_size if crop_size is not None else {"height": 18, "width": 18} __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = min_resolution __lowercase = max_resolution __lowercase = do_resize __lowercase = size __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_normalize __lowercase = image_mean __lowercase = image_std __lowercase = do_convert_rgb def _snake_case ( self : Tuple ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def _snake_case ( self : int , lowerCamelCase : int=False , lowerCamelCase : Dict=False , lowerCamelCase : Any=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __lowercase = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __lowercase = [] for i in range(self.batch_size ): __lowercase , __lowercase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs] if torchify: __lowercase = [torch.from_numpy(lowerCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : int = ChineseCLIPImageProcessor if is_vision_available() else None def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCamelCase ) @property def _snake_case ( self : List[Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase , "size" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 224, "width": 224} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) __lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def _snake_case ( self : List[str] ): '''simple docstring''' pass def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : str = ChineseCLIPImageProcessor if is_vision_available() else None def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCamelCase ) __lowercase = 3 @property def _snake_case ( self : Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase , "size" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) ) def _snake_case ( self : Dict ): '''simple docstring''' pass def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
716
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : List[str] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """yolos""" def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = version.parse("""1.11""" ) @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : str ): '''simple docstring''' return 1e-4 @property def _snake_case ( self : Tuple ): '''simple docstring''' return 12
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE ): if length <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
717
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[int] = logging.get_logger(__name__) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) __lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE ) if matches: __lowercase = float(matches[1] ) __lowercase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __lowercase = 1_0_0_1 __lowercase = "imagenet-1k-id2label.json" __lowercase = "huggingface/label-files" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} __lowercase = "background" __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} return config def snake_case_ ( ): __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE ) # Load 🤗 model __lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __lowercase = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , ) __lowercase = image_processor(images=prepare_img() , return_tensors="pt" ) __lowercase = model(**_SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits assert logits.shape == (1, 1_0_0_1) if model_name == "mobilenet_v1_1.0_224": __lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": __lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: __lowercase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("Pushing to the hub..." ) __lowercase = "google/" + model_name image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""mobilenet_v1_1.0_224""", type=str, help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""", ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
655
0
import math def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. snake_case__ : Optional[int] = "Enter the base and the power separated by a comma: " snake_case__ : str = map(int, input(prompt).split(""",""")) snake_case__ : Dict = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. snake_case__ : Optional[int] = res(xa, ya) snake_case__ : List[str] = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
718
from __future__ import annotations from typing import Any class _A : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def _snake_case ( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(lowerCamelCase ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def snake_case_ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
655
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : Optional[int] = 42 _snake_case : Tuple = 42 class _A : '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowercase = [[] for _ in range(lowerCamelCase )] __lowercase = size def __getitem__( self : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' return iter(self._graph[vertex] ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return self._size def _snake_case ( self : int , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Any ): '''simple docstring''' if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(lowerCamelCase , lowerCamelCase ) ) def _snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = deque([start_vertex] ) __lowercase = [None] * self.size __lowercase = 0 while queue: __lowercase = queue.popleft() __lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: __lowercase = current_distance + edge.weight __lowercase = distances[edge.destination_vertex] if ( isinstance(lowerCamelCase , lowerCamelCase ) and new_distance >= dest_vertex_distance ): continue __lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
719
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : List[str] = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
655
0
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : '''simple docstring''' @staticmethod def _snake_case ( *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class _A ( unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING def _snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = ObjectDetectionPipeline(model=lowerCamelCase , image_processor=lowerCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(lowerCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase , { "score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase ), "box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )}, } , ) import datasets __lowercase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) __lowercase = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] __lowercase = object_detector(lowerCamelCase , threshold=0.0 ) self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(lowerCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase , { "score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase ), "box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def _snake_case ( self : Dict ): '''simple docstring''' pass @require_torch def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = "hf-internal-testing/tiny-detr-mobilenetsv3" __lowercase = AutoModelForObjectDetection.from_pretrained(lowerCamelCase ) __lowercase = AutoFeatureExtractor.from_pretrained(lowerCamelCase ) __lowercase = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) __lowercase = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = "facebook/detr-resnet-50" __lowercase = AutoModelForObjectDetection.from_pretrained(lowerCamelCase ) __lowercase = AutoFeatureExtractor.from_pretrained(lowerCamelCase ) __lowercase = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) __lowercase = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = "facebook/detr-resnet-50" __lowercase = pipeline("object-detection" , model=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) __lowercase = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = 0.9985 __lowercase = "facebook/detr-resnet-50" __lowercase = pipeline("object-detection" , model=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCamelCase ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "Narsil/layoutlmv3-finetuned-funsd" __lowercase = 0.9993 __lowercase = pipeline("object-detection" , model=lowerCamelCase , threshold=lowerCamelCase ) __lowercase = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
720
from __future__ import annotations import bisect def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = 0 __lowercase = len(_SCREAMING_SNAKE_CASE ) - 1 while left <= right: __lowercase = left + (right - left) // 2 __lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowercase = midpoint - 1 else: __lowercase = midpoint + 1 return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item: return index return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if right < left: return None __lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 ) else: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip() snake_case__ : Any = sorted(int(item) for item in user_input.split(""",""")) snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n""")) snake_case__ : List[Any] = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
655
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} _snake_case : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _snake_case : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS _snake_case : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self : str ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) __lowercase = PNDMScheduler(skip_prk_steps=lowerCamelCase ) torch.manual_seed(0 ) __lowercase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __lowercase = CLIPTextModel(lowerCamelCase ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __lowercase = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]=0 ): '''simple docstring''' __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) __lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowercase = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("RGB" ) if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def _snake_case ( self : int ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Any ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = "french fries" __lowercase = sd_pipe(**lowerCamelCase , negative_prompt=lowerCamelCase ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = [inputs["prompt"]] * 2 __lowercase = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 __lowercase = torch.from_numpy(lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase ) __lowercase = image / 2 + 0.5 __lowercase = image.permute(0 , 3 , 1 , 2 ) __lowercase = image.repeat(2 , 1 , 1 , 1 ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __lowercase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" ) __lowercase = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] __lowercase = [round(lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(lowerCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : int ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase ) __lowercase = VaeImageProcessor(do_resize=lowerCamelCase , do_normalize=lowerCamelCase ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase , input_image_type="pt" ) )[0] __lowercase = components["vae"] __lowercase = self.get_dummy_inputs_by_type(lowerCamelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __lowercase = vae.encode(inputs[image_param] ).latent_dist.mode() __lowercase = pipe(**lowerCamelCase )[0] __lowercase = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCamelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any , lowerCamelCase : List[Any]=0 ): '''simple docstring''' __lowercase = torch.manual_seed(lowerCamelCase ) __lowercase = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) __lowercase = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __lowercase = self.get_inputs() __lowercase = pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase ) __lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __lowercase = self.get_inputs() __lowercase = pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase ) __lowercase = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __lowercase = self.get_inputs() __lowercase = pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = 0 def callback_fn(lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor ) -> None: __lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __lowercase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __lowercase = False __lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __lowercase = self.get_inputs() pipe(**lowerCamelCase , callback=lowerCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowercase = self.get_inputs() __lowercase = pipe(**lowerCamelCase ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _snake_case ( self : int ): '''simple docstring''' __lowercase = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __lowercase = inputs["image"].resize((504, 504) ) __lowercase = "timbrooks/instruct-pix2pix" __lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCamelCase , safety_checker=lowerCamelCase , ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __lowercase = pipe(**lowerCamelCase ) __lowercase = output.images[0] __lowercase = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __lowercase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
721
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """conditional_detr""" _snake_case : Union[str, Any] = ["""past_key_values"""] _snake_case : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = backbone_config.get("model_type" ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(lowerCamelCase ) __lowercase = use_timm_backbone __lowercase = backbone_config __lowercase = num_channels __lowercase = num_queries __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = init_xavier_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = encoder_layers __lowercase = auxiliary_loss __lowercase = position_embedding_type __lowercase = backbone __lowercase = use_pretrained_backbone __lowercase = dilation # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = mask_loss_coefficient __lowercase = dice_loss_coefficient __lowercase = cls_loss_coefficient __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return self.encoder_attention_heads @property def _snake_case ( self : str ): '''simple docstring''' return self.d_model def _snake_case ( self : int ): '''simple docstring''' __lowercase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output class _A ( _lowercase ): '''simple docstring''' _snake_case : Any = version.parse("""1.11""" ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _snake_case ( self : Any ): '''simple docstring''' return 1e-5 @property def _snake_case ( self : Optional[Any] ): '''simple docstring''' return 12
655
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case__ : List[str] = """Create a default config file for Accelerate with only a few flags set.""" def snake_case_ ( _SCREAMING_SNAKE_CASE="no" , _SCREAMING_SNAKE_CASE = default_json_config_file , _SCREAMING_SNAKE_CASE = False ): __lowercase = Path(_SCREAMING_SNAKE_CASE ) path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) if path.exists(): print( F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False __lowercase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) __lowercase = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): __lowercase = torch.cuda.device_count() __lowercase = num_gpus __lowercase = False if num_gpus > 1: __lowercase = "MULTI_GPU" else: __lowercase = "NO" elif is_xpu_available() and use_xpu: __lowercase = torch.xpu.device_count() __lowercase = num_xpus __lowercase = False if num_xpus > 1: __lowercase = "MULTI_XPU" else: __lowercase = "NO" elif is_npu_available(): __lowercase = torch.npu.device_count() __lowercase = num_npus __lowercase = False if num_npus > 1: __lowercase = "MULTI_NPU" else: __lowercase = "NO" else: __lowercase = 0 __lowercase = True __lowercase = 1 __lowercase = "NO" __lowercase = ClusterConfig(**_SCREAMING_SNAKE_CASE ) config.to_json_file(_SCREAMING_SNAKE_CASE ) return path def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = parser.add_parser("default" , parents=_SCREAMING_SNAKE_CASE , help=_SCREAMING_SNAKE_CASE , formatter_class=_SCREAMING_SNAKE_CASE ) parser.add_argument( "--config_file" , default=_SCREAMING_SNAKE_CASE , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=_SCREAMING_SNAKE_CASE , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F"""accelerate configuration saved at {config_file}""" )
700
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ : Any = logging.get_logger(__name__) class _A ( _lowercase , _lowercase ): '''simple docstring''' _snake_case : Dict = """maskformer-swin""" _snake_case : List[str] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowerCamelCase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) ) __lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
655
0
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _A ( _lowercase ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : int=13 , lowerCamelCase : Any=7 , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=True , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=False , lowerCamelCase : Tuple=False , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=2 , lowerCamelCase : Optional[Any]=99 , lowerCamelCase : Tuple=0 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : Tuple=5 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : Any=12 , lowerCamelCase : Dict=2 , lowerCamelCase : str=0.02 , lowerCamelCase : Tuple=3 , lowerCamelCase : str=4 , lowerCamelCase : Optional[Any]="last" , lowerCamelCase : int=None , lowerCamelCase : List[Any]=None , ): '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_lengths __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = gelu_activation __lowercase = sinusoidal_embeddings __lowercase = causal __lowercase = asm __lowercase = n_langs __lowercase = vocab_size __lowercase = n_special __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = summary_type __lowercase = use_proj __lowercase = scope def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_input_lengths: __lowercase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , 2 ).float() __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self : int ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = FlaubertModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase ) __lowercase = model(lowerCamelCase , langs=lowerCamelCase ) __lowercase = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , ): '''simple docstring''' __lowercase = FlaubertWithLMHeadModel(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Any , ): '''simple docstring''' __lowercase = FlaubertForQuestionAnsweringSimple(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = model(lowerCamelCase ) __lowercase = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , ): '''simple docstring''' __lowercase = FlaubertForQuestionAnswering(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = model(lowerCamelCase ) __lowercase = model( lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , ) __lowercase = model( lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , ) ((__lowercase ) , ) = result_with_labels.to_tuple() __lowercase = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase ) ((__lowercase ) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , ): '''simple docstring''' __lowercase = FlaubertForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = model(lowerCamelCase ) __lowercase = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , ): '''simple docstring''' __lowercase = self.num_labels __lowercase = FlaubertForTokenClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , ): '''simple docstring''' __lowercase = self.num_choices __lowercase = FlaubertForMultipleChoice(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase = model( lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _A ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : List[str] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _snake_case : Any = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=False ): '''simple docstring''' __lowercase = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __lowercase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase ) __lowercase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase ) return inputs_dict def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = FlaubertModelTester(self ) __lowercase = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 ) def _snake_case ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCamelCase ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCamelCase ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCamelCase ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCamelCase ) @slow def _snake_case ( self : int ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = FlaubertModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @slow @require_torch_gpu def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __lowercase = True __lowercase = model_class(config=lowerCamelCase ) __lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) __lowercase = torch.jit.trace( lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCamelCase , os.path.join(lowerCamelCase , "traced_model.pt" ) ) __lowercase = torch.jit.load(os.path.join(lowerCamelCase , "traced_model.pt" ) , map_location=lowerCamelCase ) loaded(inputs_dict["input_ids"].to(lowerCamelCase ) , inputs_dict["attention_mask"].to(lowerCamelCase ) ) @require_torch class _A ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) __lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): __lowercase = model(lowerCamelCase )[0] __lowercase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowerCamelCase ) __lowercase = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
701
def snake_case_ ( _SCREAMING_SNAKE_CASE ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = int(sequence[i] , 2 ) return sequence def snake_case_ ( _SCREAMING_SNAKE_CASE ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __lowercase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __lowercase = gray_code_sequence_string(bit_count - 1 ) __lowercase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __lowercase = "0" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __lowercase = "1" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
655
0
from collections import defaultdict from math import ceil, sqrt def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 , _SCREAMING_SNAKE_CASE = 1_0 ): __lowercase = defaultdict(_SCREAMING_SNAKE_CASE ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __lowercase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __lowercase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
702
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ): model.train() __lowercase = model(_SCREAMING_SNAKE_CASE ) __lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): set_seed(4_2 ) __lowercase = RegressionModel() __lowercase = deepcopy(_SCREAMING_SNAKE_CASE ) __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __lowercase = AdamW(params=model.parameters() , lr=1E-3 ) __lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) # Make a copy of `model` if sched: __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test when on a single CPU or GPU that the context manager does nothing __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test on distributed setup that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n""" __lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def snake_case_ ( ): __lowercase = Accelerator() __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase = RegressionDataset(length=9_6 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if iteration < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def snake_case_ ( ): __lowercase = Accelerator() __lowercase = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
655
0
from __future__ import annotations from typing import Any class _A ( _lowercase ): '''simple docstring''' pass class _A : '''simple docstring''' def __init__( self : Optional[Any] , lowerCamelCase : Any ): '''simple docstring''' __lowercase = data __lowercase = None def __iter__( self : Optional[int] ): '''simple docstring''' __lowercase = self __lowercase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase ) yield node.data __lowercase = node.next_node @property def _snake_case ( self : List[str] ): '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": snake_case__ : Any = Node(1) snake_case__ : Dict = Node(2) snake_case__ : Any = Node(3) snake_case__ : Any = Node(4) print(root_node.has_loop) # False snake_case__ : Optional[Any] = root_node.next_node print(root_node.has_loop) # True snake_case__ : List[Any] = Node(5) snake_case__ : Optional[int] = Node(6) snake_case__ : Union[str, Any] = Node(5) snake_case__ : List[str] = Node(6) print(root_node.has_loop) # False snake_case__ : List[str] = Node(1) print(root_node.has_loop) # False
703
from ....utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ): '''simple docstring''' __lowercase = config.__dict__ __lowercase = modal_hidden_size if num_labels: __lowercase = num_labels
655
0
import json import sys def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: __lowercase = json.load(_SCREAMING_SNAKE_CASE ) __lowercase = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): __lowercase = results[benchmark_name] __lowercase = benchmark_name.split("/" )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) __lowercase = "| metric |" __lowercase = "|--------|" __lowercase = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): __lowercase = benchmark_res[metric_name] __lowercase = metric_vals["new"] __lowercase = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) __lowercase = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) __lowercase = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": snake_case__ : List[str] = sys.argv[1] snake_case__ : Optional[Any] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
704
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = StableUnCLIPImgaImgPipeline _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : int = frozenset([] ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = 32 __lowercase = embedder_hidden_size # image encoding components __lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __lowercase = input_image * 0.5 + 0.5 __lowercase = input_image.clamp(0 , 1 ) __lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _snake_case ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(_SCREAMING_SNAKE_CASE ) * abs(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
705
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _A ( _lowercase , _lowercase ): '''simple docstring''' @register_to_config def __init__( self : Optional[Any] , *, lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__() __lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) ) # parameters for additional clip time embeddings __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) # parameters for encoder hidden states __lowercase = clip_extra_context_tokens __lowercase = nn.Linear( lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.LayerNorm(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowercase = image_embeddings.shape[0] __lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowercase = classifier_free_guidance_embeddings.expand( lowerCamelCase , -1 ) __lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowercase = self.embedding_proj(lowerCamelCase ) __lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase ) __lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase ) __lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens ) __lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowercase = self.encoder_hidden_states_proj(lowerCamelCase ) __lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase ) __lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
655
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING snake_case__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(_lowercase ) class _A ( _lowercase ): '''simple docstring''' def __init__( self : Tuple , *lowerCamelCase : Dict , **lowerCamelCase : Dict ): '''simple docstring''' super().__init__(*lowerCamelCase , **lowerCamelCase ) requires_backends(self , "decord" ) self.check_model_type(lowerCamelCase ) def _snake_case ( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None ): '''simple docstring''' __lowercase = {} if frame_sampling_rate is not None: __lowercase = frame_sampling_rate if num_frames is not None: __lowercase = num_frames __lowercase = {} if top_k is not None: __lowercase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , lowerCamelCase : Union[str, List[str]] , **lowerCamelCase : Any ): '''simple docstring''' return super().__call__(lowerCamelCase , **lowerCamelCase ) def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Any=None , lowerCamelCase : Tuple=1 ): '''simple docstring''' if num_frames is None: __lowercase = self.model.config.num_frames if video.startswith("http://" ) or video.startswith("https://" ): __lowercase = BytesIO(requests.get(lowerCamelCase ).content ) __lowercase = VideoReader(lowerCamelCase ) videoreader.seek(0 ) __lowercase = 0 __lowercase = num_frames * frame_sampling_rate - 1 __lowercase = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa ) __lowercase = videoreader.get_batch(lowerCamelCase ).asnumpy() __lowercase = list(lowerCamelCase ) __lowercase = self.image_processor(lowerCamelCase , return_tensors=self.framework ) return model_inputs def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = self.model(**lowerCamelCase ) return model_outputs def _snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : Any=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: __lowercase = self.model.config.num_labels if self.framework == "pt": __lowercase = model_outputs.logits.softmax(-1 )[0] __lowercase , __lowercase = probs.topk(lowerCamelCase ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __lowercase = scores.tolist() __lowercase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
706
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case__ : Union[str, Any] = TypeVar("""T""") snake_case__ : Optional[int] = TypeVar("""U""") class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ): '''simple docstring''' __lowercase = key __lowercase = val __lowercase = None __lowercase = None def __repr__( self : Any ): '''simple docstring''' return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase , __lowercase = self.rear, self.head def __repr__( self : Optional[Any] ): '''simple docstring''' __lowercase = ["DoubleLinkedList"] __lowercase = self.head while node.next is not None: rep.append(str(lowerCamelCase ) ) __lowercase = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' __lowercase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __lowercase = node __lowercase = previous __lowercase = node __lowercase = self.rear def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None __lowercase = node.next __lowercase = node.prev __lowercase = None __lowercase = None return node class _A ( Generic[T, U] ): '''simple docstring''' _snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = DoubleLinkedList() __lowercase = capacity __lowercase = 0 __lowercase = 0 __lowercase = 0 __lowercase = {} def __repr__( self : Optional[Any] ): '''simple docstring''' return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : Dict , lowerCamelCase : T ): '''simple docstring''' return key in self.cache def _snake_case ( self : List[Any] , lowerCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 __lowercase = self.cache[key] __lowercase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowerCamelCase ) return node.val self.miss += 1 return None def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __lowercase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowerCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __lowercase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __lowercase = value self.list.add(lowerCamelCase ) @classmethod def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ): '''simple docstring''' def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*lowerCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: __lowercase = LRUCache(lowerCamelCase ) __lowercase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __lowercase = func(*lowerCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
655
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Any ): '''simple docstring''' __lowercase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) __lowercase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase = model(lowerCamelCase )["last_hidden_state"] __lowercase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , lowerCamelCase ) # compare the actual values for a slice. __lowercase = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
707
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) snake_case__ : Optional[Any] = logging.getLogger() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = "\n".join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random""" snake_case__ : int = """sshleifer/bart-tiny-random""" snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart""" snake_case__ : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _A ( _lowercase ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(lowerCamelCase , lowerCamelCase ) __lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_generate() assert Path(lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self : Dict ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __lowercase = Path(self.get_auto_remove_tmp_dir() ) __lowercase = str(tmp_dir / "scores.json" ) __lowercase = str(tmp_dir / "val.target" ) _dump_articles(lowerCamelCase , text["en"] ) _dump_articles(lowerCamelCase , text["de"] ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {str(lowerCamelCase )} {str(lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): with CaptureStdout() as cs: run_search() __lowercase = [" num_beams | length_penalty", model, "Best score args"] __lowercase = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase ).exists() os.remove(Path(lowerCamelCase ) )
655
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters snake_case__ : Optional[Any] = logging.get_logger(__name__) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ): # Recurse if needed if "." in tensor_name: __lowercase = tensor_name.split("." ) for split in splits[:-1]: __lowercase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) __lowercase = new_module __lowercase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) __lowercase = tensor_name in module._buffers __lowercase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) __lowercase = False __lowercase = False if is_buffer or not is_bitsandbytes_available(): __lowercase = False __lowercase = False else: __lowercase = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __lowercase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __lowercase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __lowercase = old_value.to(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): __lowercase = value.to("cpu" ) if value.dtype == torch.inta: __lowercase = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse( "0.37.2" ) if not is_abit_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) else: __lowercase = torch.tensor(_SCREAMING_SNAKE_CASE , device="cpu" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , _SCREAMING_SNAKE_CASE ) and fpaa_statistics is None: __lowercase = new_value.T __lowercase = old_value.__dict__ if is_abit: __lowercase = bnb.nn.IntaParams(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) elif is_abit: __lowercase = bnb.nn.Paramsabit(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) __lowercase = new_value if fpaa_statistics is not None: setattr(module.weight , "SCB" , fpaa_statistics.to(_SCREAMING_SNAKE_CASE ) ) else: if value is None: __lowercase = old_value.to(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): __lowercase = value.to(_SCREAMING_SNAKE_CASE ) else: __lowercase = torch.tensor(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) if is_buffer: __lowercase = new_value else: __lowercase = nn.Parameter(_SCREAMING_SNAKE_CASE , requires_grad=old_value.requires_grad ) __lowercase = new_value def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ): for name, module in model.named_children(): if current_key_name is None: __lowercase = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if (isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in ".".join(_SCREAMING_SNAKE_CASE ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = module.weight.shape else: __lowercase = module.in_features __lowercase = module.out_features if quantization_config.quantization_method() == "llm_int8": __lowercase = bnb.nn.LinearabitLt( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __lowercase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __lowercase = bnb.nn.Linearabit( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __lowercase = True # Store the module class in case we need to transpose the weight later __lowercase = type(_SCREAMING_SNAKE_CASE ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(_SCREAMING_SNAKE_CASE ) if len(list(module.children() ) ) > 0: __lowercase , __lowercase = _replace_with_bnb_linear( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_been_replaced=_SCREAMING_SNAKE_CASE , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ): __lowercase = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert __lowercase , __lowercase = _replace_with_bnb_linear( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def snake_case_ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): warnings.warn( "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _SCREAMING_SNAKE_CASE , ) return replace_with_bnb_linear(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def snake_case_ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): warnings.warn( "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _SCREAMING_SNAKE_CASE , ) return set_module_quantized_tensor_to_device(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __lowercase = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __lowercase = sum(_SCREAMING_SNAKE_CASE , [] ) __lowercase = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model __lowercase = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __lowercase = list(model.named_children() ) __lowercase = [list_modules[-1][0]] # add last module together with tied weights __lowercase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) __lowercase = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys __lowercase = [".weight", ".bias"] __lowercase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __lowercase = name.replace(_SCREAMING_SNAKE_CASE , "" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names
708
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : int _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""") def snake_case_ ( _SCREAMING_SNAKE_CASE ): if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowercase , __lowercase = get_distrib(node.left ) __lowercase , __lowercase = get_distrib(node.right ) __lowercase = 1 - left_distrib_excess __lowercase = 1 - right_distrib_excess __lowercase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) __lowercase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
655
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = 0 def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : List[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowercase = Path(lowerCamelCase ) / "preprocessor_config.json" __lowercase = Path(lowerCamelCase ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) ) __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowercase = Path(lowerCamelCase ) / "preprocessor_config.json" __lowercase = Path(lowerCamelCase ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) ) __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowercase = CLIPConfig() # Create a dummy config file with image_proceesor_type __lowercase = Path(lowerCamelCase ) / "preprocessor_config.json" __lowercase = Path(lowerCamelCase ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ).to_dict() config_dict.pop("image_processor_type" ) __lowercase = CLIPImageProcessor(**lowerCamelCase ) # save in new folder model_config.save_pretrained(lowerCamelCase ) config.save_pretrained(lowerCamelCase ) __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ) # make sure private variable is not incorrectly saved __lowercase = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowercase = Path(lowerCamelCase ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , ) __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase , "clip-base is not a local folder and is not a valid model identifier" ): __lowercase = AutoImageProcessor.from_pretrained("clip-base" ) def _snake_case ( self : List[Any] ): '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase , revision="aaaaaa" ) def _snake_case ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): __lowercase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaises(lowerCamelCase ): __lowercase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase ): __lowercase = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase ) __lowercase = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCamelCase ) __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def _snake_case ( self : List[Any] ): '''simple docstring''' try: AutoConfig.register("custom" , lowerCamelCase ) AutoImageProcessor.register(lowerCamelCase , lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase ): AutoImageProcessor.register(lowerCamelCase , lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: __lowercase = Path(lowerCamelCase ) / "preprocessor_config.json" __lowercase = Path(lowerCamelCase ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) ) __lowercase = CustomImageProcessor.from_pretrained(lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCamelCase ) __lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self : Optional[int] ): '''simple docstring''' class _A ( _lowercase ): '''simple docstring''' _snake_case : Union[str, Any] = True try: AutoConfig.register("custom" , lowerCamelCase ) AutoImageProcessor.register(lowerCamelCase , lowerCamelCase ) # If remote code is not set, the default is to use local __lowercase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __lowercase = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __lowercase = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(lowerCamelCase , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
709
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = SwinvaConfig() __lowercase = swinva_name.split("_" ) __lowercase = name_split[1] if "to" in name_split[3]: __lowercase = int(name_split[3][-3:] ) else: __lowercase = int(name_split[3] ) if "to" in name_split[2]: __lowercase = int(name_split[2][-2:] ) else: __lowercase = int(name_split[2][6:] ) if model_size == "tiny": __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "small": __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "base": __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) else: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: __lowercase = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowercase = 2_1_8_4_1 __lowercase = "huggingface/label-files" __lowercase = "imagenet-22k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} else: __lowercase = 1_0_0_0 __lowercase = "huggingface/label-files" __lowercase = "imagenet-1k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = img_size __lowercase = num_classes __lowercase = embed_dim __lowercase = depths __lowercase = num_heads __lowercase = window_size return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): if "patch_embed.proj" in name: __lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowercase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowercase = "encoder." + name if "attn.proj" in name: __lowercase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowercase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowercase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowercase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowercase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowercase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __lowercase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __lowercase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __lowercase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __lowercase = "layernorm.weight" if name == "norm.bias": __lowercase = "layernorm.bias" if "head" in name: __lowercase = name.replace("head" , "classifier" ) else: __lowercase = "swinv2." + name return name def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for key in orig_state_dict.copy().keys(): __lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __lowercase = key.split("." ) __lowercase = int(key_split[1] ) __lowercase = int(key_split[3] ) __lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowercase = val[:dim, :] __lowercase = val[dim : dim * 2, :] __lowercase = val[-dim:, :] else: __lowercase = val[:dim] __lowercase = val[ dim : dim * 2 ] __lowercase = val[-dim:] else: __lowercase = val return orig_state_dict def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE ) __lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) __lowercase = timm_model(inputs["pixel_values"] ) __lowercase = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
655
0
'''simple docstring''' import math import random def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value snake_case__ : Optional[Any] = 0.0_2 def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = float(2 * (random.randint(1 , 1_0_0 )) - 1 ) for _ in range(_SCREAMING_SNAKE_CASE ): # Forward propagation __lowercase = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __lowercase = (expected / 1_0_0) - layer_a # Error delta __lowercase = layer_1_error * sigmoid_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_0_0 if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : Union[str, Any] = int(input("""Expected value: """)) snake_case__ : Optional[int] = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
710
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } snake_case__ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case_ ( ): __lowercase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char return pairs class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __lowercase = json.load(lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : Optional[int] ): '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(lowerCamelCase ) __lowercase = get_pairs(lowerCamelCase ) if not pairs: return token while True: __lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(lowerCamelCase ): try: __lowercase = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(lowerCamelCase ) __lowercase = new_word if len(lowerCamelCase ) == 1: break else: __lowercase = get_pairs(lowerCamelCase ) __lowercase = " ".join(lowerCamelCase ) __lowercase = word return word def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = [] for token in re.findall(self.pat , lowerCamelCase ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __lowercase = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): '''simple docstring''' __lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' __lowercase = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: __lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
655
0
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): def get_masked_lm_array(_SCREAMING_SNAKE_CASE ): __lowercase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if "kernel" in name: __lowercase = array.transpose() return torch.from_numpy(_SCREAMING_SNAKE_CASE ) def get_encoder_array(_SCREAMING_SNAKE_CASE ): __lowercase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if "kernel" in name: __lowercase = array.transpose() return torch.from_numpy(_SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if "kernel" in name: __lowercase = array.transpose() return torch.from_numpy(_SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = array.reshape(_SCREAMING_SNAKE_CASE ) if "kernel" in name: __lowercase = array.transpose() return torch.from_numpy(_SCREAMING_SNAKE_CASE ) print(f"""Loading model based on config from {config_path}...""" ) __lowercase = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) __lowercase = BertForMaskedLM(_SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __lowercase = model.bert.encoder.layer[layer_index] # Self-attention __lowercase = layer.attention.self __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_query_dense/kernel" , self_attn.query.weight.data.shape ) __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_query_dense/bias" , self_attn.query.bias.data.shape ) __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_key_dense/kernel" , self_attn.key.weight.data.shape ) __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_key_dense/bias" , self_attn.key.bias.data.shape ) __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_value_dense/kernel" , self_attn.value.weight.data.shape ) __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_value_dense/bias" , self_attn.value.bias.data.shape ) # Self-attention Output __lowercase = layer.attention.output __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_output_dense/kernel" , self_output.dense.weight.data.shape ) __lowercase = get_encoder_attention_layer_array( _SCREAMING_SNAKE_CASE , "_output_dense/bias" , self_output.dense.bias.data.shape ) __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_attention_layer_norm/gamma" ) __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_attention_layer_norm/beta" ) # Intermediate __lowercase = layer.intermediate __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_intermediate_dense/kernel" ) __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_intermediate_dense/bias" ) # Output __lowercase = layer.output __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_dense/kernel" ) __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_dense/bias" ) __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_layer_norm/gamma" ) __lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_layer_norm/beta" ) # Embeddings __lowercase = get_encoder_array("_position_embedding_layer/embeddings" ) __lowercase = get_encoder_array("_type_embedding_layer/embeddings" ) __lowercase = get_encoder_array("_embedding_norm_layer/gamma" ) __lowercase = get_encoder_array("_embedding_norm_layer/beta" ) # LM Head __lowercase = model.cls.predictions.transform __lowercase = get_masked_lm_array("dense/kernel" ) __lowercase = get_masked_lm_array("dense/bias" ) __lowercase = get_masked_lm_array("layer_norm/gamma" ) __lowercase = get_masked_lm_array("layer_norm/beta" ) __lowercase = get_masked_lm_array("embedding_table" ) # Pooling __lowercase = BertPooler(config=_SCREAMING_SNAKE_CASE ) __lowercase = get_encoder_array("_pooler_layer/kernel" ) __lowercase = get_encoder_array("_pooler_layer/bias" ) # Export final model model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) __lowercase = BertForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print("Model conversion was done sucessfully!" ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) snake_case__ : Any = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
711
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] # Creating a copy of the list and sorting profit/weight in ascending order __lowercase = sorted(_SCREAMING_SNAKE_CASE ) # declaring useful variables __lowercase = len(_SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __lowercase = sorted_profit_by_weight[length - i - 1] __lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE ) __lowercase = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()] snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()] snake_case__ : Optional[Any] = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
655
0
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""") # TF training parameters snake_case__ : Dict = False snake_case__ : Dict = False def snake_case_ ( _SCREAMING_SNAKE_CASE ): return TrainCommand(_SCREAMING_SNAKE_CASE ) class _A ( _lowercase ): '''simple docstring''' @staticmethod def _snake_case ( lowerCamelCase : ArgumentParser ): '''simple docstring''' __lowercase = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=lowerCamelCase , required=lowerCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=lowerCamelCase , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=lowerCamelCase , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=lowerCamelCase , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=lowerCamelCase , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=lowerCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=lowerCamelCase , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=lowerCamelCase , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=lowerCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=lowerCamelCase , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=lowerCamelCase , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=lowerCamelCase , default=3e-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=lowerCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=lowerCamelCase ) def __init__( self : int , lowerCamelCase : Namespace ): '''simple docstring''' __lowercase = logging.get_logger("transformers-cli/training" ) __lowercase = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=lowerCamelCase ) __lowercase = args.output __lowercase = args.column_label __lowercase = args.column_text __lowercase = args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": __lowercase = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) __lowercase = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __lowercase = None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) __lowercase = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __lowercase = args.validation_split __lowercase = args.train_batch_size __lowercase = args.valid_batch_size __lowercase = args.learning_rate __lowercase = args.adam_epsilon def _snake_case ( self : Union[str, Any] ): '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def _snake_case ( self : Union[str, Any] ): '''simple docstring''' raise NotImplementedError def _snake_case ( self : int ): '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
712
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """openai/whisper-base""" _snake_case : Union[str, Any] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) _snake_case : Any = """transcriber""" _snake_case : Any = WhisperProcessor _snake_case : Optional[int] = WhisperForConditionalGeneration _snake_case : str = ["""audio"""] _snake_case : Optional[int] = ["""text"""] def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features def _snake_case ( self : str , lowerCamelCase : List[Any] ): '''simple docstring''' return self.model.generate(inputs=lowerCamelCase ) def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
655
0
from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : int = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class _A ( _lowercase ): '''simple docstring''' _snake_case : str = """van""" def __init__( self : Optional[Any] , lowerCamelCase : Tuple=224 , lowerCamelCase : str=3 , lowerCamelCase : int=[7, 3, 3, 3] , lowerCamelCase : str=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : int=[3, 3, 12, 3] , lowerCamelCase : str=[8, 8, 4, 4] , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=1e-6 , lowerCamelCase : int=1e-2 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[int]=0.0 , **lowerCamelCase : int , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = image_size __lowercase = num_channels __lowercase = patch_sizes __lowercase = strides __lowercase = hidden_sizes __lowercase = depths __lowercase = mlp_ratios __lowercase = hidden_act __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = layer_scale_init_value __lowercase = drop_path_rate __lowercase = dropout_rate
713
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _A : '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["prompt"] __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] if "image" in inputs: __lowercase = inputs["image"] else: __lowercase = None if "mask_image" in inputs: __lowercase = inputs["mask_image"] else: __lowercase = None if "original_image" in inputs: __lowercase = inputs["original_image"] else: __lowercase = None __lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase ) # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 )
655
0
import os import jsonlines import numpy as np from tqdm import tqdm snake_case__ : Dict = 20_48 snake_case__ : str = 40_96 snake_case__ : List[str] = 42 snake_case__ : Dict = os.environ.pop("""PROCESS_TRAIN""", """false""") snake_case__ : Optional[Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4} def snake_case_ ( _SCREAMING_SNAKE_CASE ): def choose_first(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) == 1: __lowercase = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: __lowercase = {k: [a[k]] for k in a} if len(a["start_token"] ) > 0: break return a __lowercase = {"id": example["id"]} __lowercase = example["annotations"] __lowercase = annotation["yes_no_answer"] if 0 in yes_no_answer or 1 in yes_no_answer: __lowercase = ["yes"] if 1 in yes_no_answer else ["no"] __lowercase = __lowercase = [] __lowercase = __lowercase = [] __lowercase = ["<cls>"] else: __lowercase = ["short"] __lowercase = choose_first(annotation["short_answers"] ) if len(out["start_token"] ) == 0: # answer will be long if short is not available __lowercase = ["long"] __lowercase = choose_first(annotation["long_answer"] , is_long_answer=_SCREAMING_SNAKE_CASE ) __lowercase = [] answer.update(_SCREAMING_SNAKE_CASE ) # disregard some samples if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]: __lowercase = True else: __lowercase = False __lowercase = ["start_token", "end_token", "start_byte", "end_byte", "text"] if not all(isinstance(answer[k] , _SCREAMING_SNAKE_CASE ) for k in cols ): raise ValueError("Issue in ID" , example["id"] ) return answer def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowercase = _get_single_answer(_SCREAMING_SNAKE_CASE ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element __lowercase = example["document"]["tokens"] __lowercase = [] for i in range(len(doc["token"] ) ): if not doc["is_html"][i]: context.append(doc["token"][i] ) return { "context": " ".join(_SCREAMING_SNAKE_CASE ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples __lowercase = ["start_token", "end_token"] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 __lowercase = example["document"]["tokens"] __lowercase = answer["start_token"] __lowercase = answer["end_token"] __lowercase = [] for i in range(len(doc["token"] ) ): if not doc["is_html"][i]: context.append(doc["token"][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 __lowercase = " ".join(context[start_token:end_token] ) # checking above code if assertion: __lowercase = doc["is_html"][answer["start_token"] : answer["end_token"]] __lowercase = doc["token"][answer["start_token"] : answer["end_token"]] __lowercase = " ".join([old[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) if not is_html[i]] ) if new != old: print("ID:" , example["id"] ) print("New:" , _SCREAMING_SNAKE_CASE , end="\n" ) print("Old:" , _SCREAMING_SNAKE_CASE , end="\n\n" ) return { "context": " ".join(_SCREAMING_SNAKE_CASE ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2_0_4_8 , _SCREAMING_SNAKE_CASE=4_0_9_6 , _SCREAMING_SNAKE_CASE=True ): # overlap will be of doc_stride - q_len __lowercase = get_context_and_ans(_SCREAMING_SNAKE_CASE , assertion=_SCREAMING_SNAKE_CASE ) __lowercase = out["answer"] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } __lowercase = tokenizer(example["question"]["text"] , out["context"] ).input_ids __lowercase = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element __lowercase = [] __lowercase = [] __lowercase = input_ids[:q_len] __lowercase = range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , max_length - doc_stride ) for i in doc_start_indices: __lowercase = i + max_length - q_len __lowercase = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["category"][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(_SCREAMING_SNAKE_CASE ), "end_token": [-1_0_0] * len(_SCREAMING_SNAKE_CASE ), "category": category, }, } __lowercase = out["context"].split() __lowercase = splitted_context[answer["end_token"]] __lowercase = len( tokenizer( " ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=_SCREAMING_SNAKE_CASE , ).input_ids ) __lowercase = len( tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token __lowercase = len(tokenizer(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 __lowercase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive __lowercase = answer["start_token"] __lowercase = answer["end_token"] if assertion: __lowercase = tokenizer.decode(_SCREAMING_SNAKE_CASE ) if answer["span"] != new: print("ISSUE IN TOKENIZATION" ) print("OLD:" , answer["span"] ) print("NEW:" , _SCREAMING_SNAKE_CASE , end="\n\n" ) if len(_SCREAMING_SNAKE_CASE ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } __lowercase = input_ids[:q_len] __lowercase = range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , max_length - doc_stride ) __lowercase = [] __lowercase = [] __lowercase = [] __lowercase = [] # null, yes, no, long, short for i in doc_start_indices: __lowercase = i + max_length - q_len __lowercase = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: __lowercase = start_token - i + q_len __lowercase = end_token - i + q_len answers_category.append(answer["category"][0] ) # ["short"] -> "short" else: __lowercase = -1_0_0 __lowercase = -1_0_0 answers_category.append("null" ) __lowercase = inputs[-1][start_token : end_token + 1] answers_start_token.append(_SCREAMING_SNAKE_CASE ) answers_end_token.append(_SCREAMING_SNAKE_CASE ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("ISSUE in strided for ID:" , example["id"] ) print("New:" , tokenizer.decode(_SCREAMING_SNAKE_CASE ) ) print("Old:" , tokenizer.decode(_SCREAMING_SNAKE_CASE ) , end="\n\n" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2_0_4_8 , _SCREAMING_SNAKE_CASE=4_0_9_6 , _SCREAMING_SNAKE_CASE=False ): __lowercase = get_strided_contexts_and_ans( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , doc_stride=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , assertion=_SCREAMING_SNAKE_CASE , ) return example def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with jsonlines.open(_SCREAMING_SNAKE_CASE , "a" ) as writer: for example in tqdm(_SCREAMING_SNAKE_CASE , total=len(_SCREAMING_SNAKE_CASE ) , desc="Saving samples ... " ): __lowercase = example["labels"] for ids, start, end, cat in zip( example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { "input_ids": ids, "start_token": start, "end_token": end, "category": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer snake_case__ : Union[str, Any] = load_dataset("""natural_questions""") snake_case__ : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") snake_case__ : List[Any] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""] snake_case__ : List[str] = { """tokenizer""": tokenizer, """doc_stride""": DOC_STRIDE, """max_length""": MAX_LENGTH, """assertion""": False, } snake_case__ : str = data.map(prepare_inputs, fn_kwargs=fn_kwargs) snake_case__ : Dict = data.remove_columns(["""annotations""", """document""", """id""", """question"""]) print(data) np.random.seed(SEED) snake_case__ : str = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl""" save_to_disk(data, file_name=cache_file_name)
714
import numpy as np snake_case__ : Tuple = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class _A : '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase , __lowercase = np.where(letter == self.SQUARE ) __lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() __lowercase = message.replace(" " , "" ) __lowercase = message.replace("j" , "i" ) __lowercase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape(2 * len(lowerCamelCase ) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[numbers_index * 2] ) __lowercase = int(second_step[(numbers_index * 2) + 1] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = encoded_message + letter return encoded_message def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() message.replace(" " , "" ) __lowercase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape((2, len(lowerCamelCase )) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[0, numbers_index] ) __lowercase = int(second_step[1, numbers_index] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = decoded_message + letter return decoded_message
655
0
from math import sqrt def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" __lowercase = True # 0 and 1 are none primes. if number <= 1: __lowercase = False for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowercase = False break # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool" return status def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowercase = list(range(2 , n + 1 ) ) __lowercase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowercase = 0 # filters actual prime numbers. __lowercase = [x for x in begin_list if x != 0] # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2" __lowercase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_SCREAMING_SNAKE_CASE ): ans.append(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0" __lowercase = [] # this list will be returns of the function. # potential prime number factors. __lowercase = 2 __lowercase = number if number == 0 or number == 1: ans.append(_SCREAMING_SNAKE_CASE ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_SCREAMING_SNAKE_CASE ): while quotient != 1: if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0): ans.append(_SCREAMING_SNAKE_CASE ) quotient /= factor else: factor += 1 else: ans.append(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowercase = 0 # prime factorization of 'number' __lowercase = prime_factorization(_SCREAMING_SNAKE_CASE ) __lowercase = max(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowercase = 0 # prime factorization of 'number' __lowercase = prime_factorization(_SCREAMING_SNAKE_CASE ) __lowercase = min(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int" assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool" return number % 2 == 0 def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int" assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool" return number % 2 != 0 def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE ) ), "'number' must been an int, even and > 2" __lowercase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowercase = get_prime_numbers(_SCREAMING_SNAKE_CASE ) __lowercase = len(_SCREAMING_SNAKE_CASE ) # run variable for while-loops. __lowercase = 0 __lowercase = None # exit variable. for break up the loops __lowercase = True while i < len_pn and loop: __lowercase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowercase = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (len(_SCREAMING_SNAKE_CASE ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowercase = 0 while numbera != 0: __lowercase = numbera % numbera __lowercase = numbera __lowercase = rest # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowercase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowercase = prime_factorization(_SCREAMING_SNAKE_CASE ) __lowercase = prime_factorization(_SCREAMING_SNAKE_CASE ) elif numbera == 1 or numbera == 1: __lowercase = [] __lowercase = [] __lowercase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowercase = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) __lowercase = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ): ans *= n else: __lowercase = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ): ans *= n done.append(_SCREAMING_SNAKE_CASE ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowercase = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ): ans *= n done.append(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int" __lowercase = 0 __lowercase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_SCREAMING_SNAKE_CASE ): ans += 1 # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime( _SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert ( is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowercase = p_number_a + 1 # jump to the next number __lowercase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_SCREAMING_SNAKE_CASE ): number += 1 while number < p_number_a: ans.append(_SCREAMING_SNAKE_CASE ) number += 1 # fetch the next prime number. while not is_prime(_SCREAMING_SNAKE_CASE ): number += 1 # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ans[0] != p_number_a and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1" __lowercase = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_SCREAMING_SNAKE_CASE ) # precondition assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)" return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number > 1 ), "'number' must been an int and >= 1" __lowercase = get_divisors(_SCREAMING_SNAKE_CASE ) # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (divisors[0] == 1) and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowercase = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) ) # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0" __lowercase = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def snake_case_ ( _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0" __lowercase = 0 __lowercase = 1 __lowercase = 1 # this will be return for _ in range(n - 1 ): __lowercase = ans ans += fiba __lowercase = tmp return ans
715
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _A ( ctypes.Structure ): '''simple docstring''' _snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def snake_case_ ( ): try: hide_cursor() yield finally: show_cursor()
655
0
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_0_2_4 , _SCREAMING_SNAKE_CASE=1_0_2_4 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ): __lowercase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __lowercase = SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path="train" , **_SCREAMING_SNAKE_CASE ) __lowercase = tok.pad_token_id def get_lens(_SCREAMING_SNAKE_CASE ): __lowercase = tqdm( DataLoader(_SCREAMING_SNAKE_CASE , batch_size=5_1_2 , num_workers=8 , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __lowercase = [] for batch in dl: __lowercase = batch["input_ids"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist() __lowercase = batch["labels"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist() if consider_target: for src, tgt in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): max_lens.append(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) else: max_lens.extend(_SCREAMING_SNAKE_CASE ) return max_lens __lowercase = get_lens(_SCREAMING_SNAKE_CASE ) __lowercase = SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path="val" , **_SCREAMING_SNAKE_CASE ) __lowercase = get_lens(_SCREAMING_SNAKE_CASE ) pickle_save(_SCREAMING_SNAKE_CASE , train_ds.len_file ) pickle_save(_SCREAMING_SNAKE_CASE , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
716
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : List[str] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """yolos""" def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = version.parse("""1.11""" ) @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : str ): '''simple docstring''' return 1e-4 @property def _snake_case ( self : Tuple ): '''simple docstring''' return 12
655
0
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : Dict = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _A : '''simple docstring''' _snake_case : List[Any] = PegasusConfig _snake_case : Union[str, Any] = {} _snake_case : List[Any] = """gelu""" def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : List[Any]=7 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=99 , lowerCamelCase : Dict=32 , lowerCamelCase : Union[str, Any]=5 , lowerCamelCase : Any=4 , lowerCamelCase : int=37 , lowerCamelCase : int=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Dict=20 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : str=1 , lowerCamelCase : Optional[int]=0 , ): '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = bos_token_id def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowercase = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return config, inputs_dict def _snake_case ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] ): '''simple docstring''' __lowercase = 20 __lowercase = model_class_name(lowerCamelCase ) __lowercase = model.encode(inputs_dict["input_ids"] ) __lowercase , __lowercase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) __lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowercase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowercase = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , ) __lowercase = model.decode(lowerCamelCase , lowerCamelCase ) __lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = 20 __lowercase = model_class_name(lowerCamelCase ) __lowercase = model.encode(inputs_dict["input_ids"] ) __lowercase , __lowercase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowercase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) __lowercase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowercase = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __lowercase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase ) __lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): if attention_mask is None: __lowercase = np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __lowercase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : List[Any] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : Tuple = True _snake_case : Any = False _snake_case : Optional[int] = False _snake_case : int = False def _snake_case ( self : str ): '''simple docstring''' __lowercase = FlaxPegasusModelTester(self ) __lowercase = ConfigTester(self , config_class=lowerCamelCase ) def _snake_case ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) __lowercase = model_class(lowerCamelCase ) @jax.jit def encode_jitted(lowerCamelCase : int , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : int ): return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ) with self.subTest("JIT Enabled" ): __lowercase = encode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = encode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( self : str ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = model_class(lowerCamelCase ) __lowercase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __lowercase = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ): return model.decode( decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , ) with self.subTest("JIT Enabled" ): __lowercase = decode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = decode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _snake_case ( self : Tuple ): '''simple docstring''' for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCamelCase ) __lowercase = np.ones((1, 1) ) __lowercase = model(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @slow def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) __lowercase = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) __lowercase = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] __lowercase = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] __lowercase = tokenizer(lowerCamelCase , return_tensors="np" , truncation=lowerCamelCase , max_length=512 , padding=lowerCamelCase ) __lowercase = model.generate(**lowerCamelCase , num_beams=2 ).sequences __lowercase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase ) assert tgt_text == decoded
717
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[int] = logging.get_logger(__name__) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) __lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE ) if matches: __lowercase = float(matches[1] ) __lowercase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __lowercase = 1_0_0_1 __lowercase = "imagenet-1k-id2label.json" __lowercase = "huggingface/label-files" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} __lowercase = "background" __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} return config def snake_case_ ( ): __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE ) # Load 🤗 model __lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __lowercase = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , ) __lowercase = image_processor(images=prepare_img() , return_tensors="pt" ) __lowercase = model(**_SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits assert logits.shape == (1, 1_0_0_1) if model_name == "mobilenet_v1_1.0_224": __lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": __lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: __lowercase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("Pushing to the hub..." ) __lowercase = "google/" + model_name image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""mobilenet_v1_1.0_224""", type=str, help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""", ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
655
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : Optional[int] = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """unispeech""" def __init__( self : Any , lowerCamelCase : List[str]=32 , lowerCamelCase : Optional[int]=768 , lowerCamelCase : int=12 , lowerCamelCase : int=12 , lowerCamelCase : Dict=3_072 , lowerCamelCase : Any="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : int=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Any=0.02 , lowerCamelCase : Dict=1e-5 , lowerCamelCase : Optional[Any]="group" , lowerCamelCase : str="gelu" , lowerCamelCase : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase : Dict=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase : Any=False , lowerCamelCase : Dict=128 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Any=False , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=0.05 , lowerCamelCase : Tuple=10 , lowerCamelCase : int=2 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=10 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Dict=320 , lowerCamelCase : Tuple=2 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=100 , lowerCamelCase : List[str]=256 , lowerCamelCase : List[Any]=256 , lowerCamelCase : str=0.1 , lowerCamelCase : Union[str, Any]="mean" , lowerCamelCase : List[Any]=False , lowerCamelCase : List[str]=False , lowerCamelCase : int=256 , lowerCamelCase : Any=80 , lowerCamelCase : Optional[Any]=0 , lowerCamelCase : int=1 , lowerCamelCase : Any=2 , lowerCamelCase : Optional[Any]=0.5 , **lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase ) __lowercase = hidden_size __lowercase = feat_extract_norm __lowercase = feat_extract_activation __lowercase = list(lowerCamelCase ) __lowercase = list(lowerCamelCase ) __lowercase = list(lowerCamelCase ) __lowercase = conv_bias __lowercase = num_conv_pos_embeddings __lowercase = num_conv_pos_embedding_groups __lowercase = len(self.conv_dim ) __lowercase = num_hidden_layers __lowercase = intermediate_size __lowercase = hidden_act __lowercase = num_attention_heads __lowercase = hidden_dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = feat_proj_dropout __lowercase = final_dropout __lowercase = layerdrop __lowercase = layer_norm_eps __lowercase = initializer_range __lowercase = num_ctc_classes __lowercase = vocab_size __lowercase = do_stable_layer_norm __lowercase = use_weighted_layer_sum __lowercase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase = apply_spec_augment __lowercase = mask_time_prob __lowercase = mask_time_length __lowercase = mask_time_min_masks __lowercase = mask_feature_prob __lowercase = mask_feature_length __lowercase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowercase = num_codevectors_per_group __lowercase = num_codevector_groups __lowercase = contrastive_logits_temperature __lowercase = feat_quantizer_dropout __lowercase = num_negatives __lowercase = codevector_dim __lowercase = proj_codevector_dim __lowercase = diversity_loss_weight # ctc loss __lowercase = ctc_loss_reduction __lowercase = ctc_zero_infinity # pretraining loss __lowercase = replace_prob @property def _snake_case ( self : List[str] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
718
from __future__ import annotations from typing import Any class _A : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def _snake_case ( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(lowerCamelCase ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def snake_case_ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
655
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=_lowercase ): '''simple docstring''' _snake_case : Any = ["""onnx"""] def __init__( self : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["onnx"] ) @classmethod def _snake_case ( cls : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ): '''simple docstring''' requires_backends(cls , ["onnx"] ) @classmethod def _snake_case ( cls : int , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["onnx"] )
719
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : List[str] = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
655
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput snake_case__ : str = """scheduler_config.json""" class _A ( _lowercase ): '''simple docstring''' _snake_case : Union[str, Any] = 1 _snake_case : int = 2 _snake_case : Optional[int] = 3 _snake_case : Optional[int] = 4 _snake_case : int = 5 @dataclass class _A ( _lowercase ): '''simple docstring''' _snake_case : jnp.ndarray class _A : '''simple docstring''' _snake_case : Optional[int] = SCHEDULER_CONFIG_NAME _snake_case : Dict = ["""dtype"""] _snake_case : Dict = [] _snake_case : Union[str, Any] = True @classmethod def _snake_case ( cls : Dict , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : Union[str, Any] , ): '''simple docstring''' __lowercase , __lowercase = cls.load_config( pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , ) __lowercase , __lowercase = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase ) if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ): __lowercase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def _snake_case ( self : List[str] , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[str] ): '''simple docstring''' self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase ) @property def _snake_case ( self : int ): '''simple docstring''' return self._get_compatibles() @classmethod def _snake_case ( cls : Union[str, Any] ): '''simple docstring''' __lowercase = list(set([cls.__name__] + cls._compatibles ) ) __lowercase = importlib.import_module(__name__.split("." )[0] ) __lowercase = [ getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase ) ] return compatible_classes def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE=jnp.floataa ): def alpha_bar(_SCREAMING_SNAKE_CASE ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 __lowercase = [] for i in range(_SCREAMING_SNAKE_CASE ): __lowercase = i / num_diffusion_timesteps __lowercase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) @flax.struct.dataclass class _A : '''simple docstring''' _snake_case : jnp.ndarray _snake_case : jnp.ndarray _snake_case : jnp.ndarray @classmethod def _snake_case ( cls : str , lowerCamelCase : Any ): '''simple docstring''' __lowercase = scheduler.config if config.trained_betas is not None: __lowercase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __lowercase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowercase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowercase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" ) __lowercase = 1.0 - betas __lowercase = jnp.cumprod(lowerCamelCase , axis=0 ) return cls( alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = state.alphas_cumprod __lowercase = alphas_cumprod[timesteps] ** 0.5 __lowercase = sqrt_alpha_prod.flatten() __lowercase = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape ) __lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5 __lowercase = sqrt_one_minus_alpha_prod.flatten() __lowercase = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
720
from __future__ import annotations import bisect def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = 0 __lowercase = len(_SCREAMING_SNAKE_CASE ) - 1 while left <= right: __lowercase = left + (right - left) // 2 __lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowercase = midpoint - 1 else: __lowercase = midpoint + 1 return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item: return index return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if right < left: return None __lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 ) else: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip() snake_case__ : Any = sorted(int(item) for item in user_input.split(""",""")) snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n""")) snake_case__ : List[Any] = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
655
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
721
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """conditional_detr""" _snake_case : Union[str, Any] = ["""past_key_values"""] _snake_case : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = backbone_config.get("model_type" ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(lowerCamelCase ) __lowercase = use_timm_backbone __lowercase = backbone_config __lowercase = num_channels __lowercase = num_queries __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = init_xavier_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = encoder_layers __lowercase = auxiliary_loss __lowercase = position_embedding_type __lowercase = backbone __lowercase = use_pretrained_backbone __lowercase = dilation # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = mask_loss_coefficient __lowercase = dice_loss_coefficient __lowercase = cls_loss_coefficient __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return self.encoder_attention_heads @property def _snake_case ( self : str ): '''simple docstring''' return self.d_model def _snake_case ( self : int ): '''simple docstring''' __lowercase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output class _A ( _lowercase ): '''simple docstring''' _snake_case : Any = version.parse("""1.11""" ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _snake_case ( self : Any ): '''simple docstring''' return 1e-5 @property def _snake_case ( self : Optional[Any] ): '''simple docstring''' return 12
655
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Dict = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
700
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ : Any = logging.get_logger(__name__) class _A ( _lowercase , _lowercase ): '''simple docstring''' _snake_case : Dict = """maskformer-swin""" _snake_case : List[str] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowerCamelCase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) ) __lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
655
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = analyze_text(_SCREAMING_SNAKE_CASE ) __lowercase = list(" " + ascii_lowercase ) # what is our total sum of probabilities. __lowercase = sum(single_char_strings.values() ) # one length string __lowercase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __lowercase = single_char_strings[ch] __lowercase = my_str / all_sum my_fir_sum += prob * math.loga(_SCREAMING_SNAKE_CASE ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string __lowercase = sum(two_char_strings.values() ) __lowercase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __lowercase = cha + cha if sequence in two_char_strings: __lowercase = two_char_strings[sequence] __lowercase = int(_SCREAMING_SNAKE_CASE ) / all_sum my_sec_sum += prob * math.loga(_SCREAMING_SNAKE_CASE ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = Counter() # type: ignore __lowercase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def snake_case_ ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
701
def snake_case_ ( _SCREAMING_SNAKE_CASE ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = int(sequence[i] , 2 ) return sequence def snake_case_ ( _SCREAMING_SNAKE_CASE ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __lowercase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __lowercase = gray_code_sequence_string(bit_count - 1 ) __lowercase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __lowercase = "0" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __lowercase = "1" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
655
0
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Any = ConsistencyModelPipeline _snake_case : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _snake_case : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _snake_case : str = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = UNetaDModel.from_pretrained( "diffusers/consistency-models-test" , subfolder="test_unet" , ) return unet @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = UNetaDModel.from_pretrained( "diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , ) return unet def _snake_case ( self : Optional[Any] , lowerCamelCase : List[str]=False ): '''simple docstring''' if class_cond: __lowercase = self.dummy_cond_unet else: __lowercase = self.dummy_uncond_unet # Default to CM multistep sampler __lowercase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowercase = { "unet": unet, "scheduler": scheduler, } return components def _snake_case ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Optional[int]=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = { "batch_size": 1, "num_inference_steps": None, "timesteps": [22, 0], "generator": generator, "output_type": "np", } return inputs def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = ConsistencyModelPipeline(**lowerCamelCase ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 32, 32, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components(class_cond=lowerCamelCase ) __lowercase = ConsistencyModelPipeline(**lowerCamelCase ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = 0 __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 32, 32, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = ConsistencyModelPipeline(**lowerCamelCase ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = 1 __lowercase = None __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 32, 32, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components(class_cond=lowerCamelCase ) __lowercase = ConsistencyModelPipeline(**lowerCamelCase ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = 1 __lowercase = None __lowercase = 0 __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 32, 32, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : str , lowerCamelCase : Tuple=0 , lowerCamelCase : List[str]=False , lowerCamelCase : Tuple="cpu" , lowerCamelCase : Optional[Any]=torch.floataa , lowerCamelCase : Optional[int]=(1, 3, 64, 64) ): '''simple docstring''' __lowercase = torch.manual_seed(lowerCamelCase ) __lowercase = { "num_inference_steps": None, "timesteps": [22, 0], "class_labels": 0, "generator": generator, "output_type": "np", } if get_fixed_latents: __lowercase = self.get_fixed_latents(seed=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase , shape=lowerCamelCase ) __lowercase = latents return inputs def _snake_case ( self : Tuple , lowerCamelCase : Tuple=0 , lowerCamelCase : Dict="cpu" , lowerCamelCase : Optional[int]=torch.floataa , lowerCamelCase : Dict=(1, 3, 64, 64) ): '''simple docstring''' if type(lowerCamelCase ) == str: __lowercase = torch.device(lowerCamelCase ) __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase ) return latents def _snake_case ( self : int ): '''simple docstring''' __lowercase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) __lowercase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowercase = ConsistencyModelPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase ) pipe.to(torch_device=lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_inputs() __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 64, 64, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case ( self : int ): '''simple docstring''' __lowercase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) __lowercase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowercase = ConsistencyModelPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase ) pipe.to(torch_device=lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_inputs() __lowercase = 1 __lowercase = None __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 64, 64, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) __lowercase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowercase = ConsistencyModelPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase ) pipe.to(torch_device=lowerCamelCase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_inputs(get_fixed_latents=lowerCamelCase , device=lowerCamelCase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCamelCase , enable_math=lowerCamelCase , enable_mem_efficient=lowerCamelCase ): __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 64, 64, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) __lowercase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowercase = ConsistencyModelPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase ) pipe.to(torch_device=lowerCamelCase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_inputs(get_fixed_latents=lowerCamelCase , device=lowerCamelCase ) __lowercase = 1 __lowercase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCamelCase , enable_math=lowerCamelCase , enable_mem_efficient=lowerCamelCase ): __lowercase = pipe(**lowerCamelCase ).images assert image.shape == (1, 64, 64, 3) __lowercase = image[0, -3:, -3:, -1] __lowercase = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
702
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ): model.train() __lowercase = model(_SCREAMING_SNAKE_CASE ) __lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): set_seed(4_2 ) __lowercase = RegressionModel() __lowercase = deepcopy(_SCREAMING_SNAKE_CASE ) __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __lowercase = AdamW(params=model.parameters() , lr=1E-3 ) __lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) # Make a copy of `model` if sched: __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test when on a single CPU or GPU that the context manager does nothing __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test on distributed setup that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n""" __lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def snake_case_ ( ): __lowercase = Accelerator() __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase = RegressionDataset(length=9_6 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if iteration < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def snake_case_ ( ): __lowercase = Accelerator() __lowercase = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
655
0
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowercase = None if token is not None: __lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} __lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" __lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() __lowercase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) __lowercase = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(_SCREAMING_SNAKE_CASE ): __lowercase = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowercase = None if token is not None: __lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} __lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" __lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() __lowercase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) __lowercase = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(_SCREAMING_SNAKE_CASE ): __lowercase = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = None if token is not None: __lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} __lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE ) __lowercase = result.headers["Location"] __lowercase = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE ) __lowercase = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) with open(_SCREAMING_SNAKE_CASE , "wb" ) as fp: fp.write(response.content ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowercase = [] __lowercase = [] __lowercase = None with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_SCREAMING_SNAKE_CASE ) as f: for line in f: __lowercase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs __lowercase = line[: line.index(": " )] __lowercase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed __lowercase = line[len("FAILED " ) :] failed_tests.append(_SCREAMING_SNAKE_CASE ) elif filename == "job_name.txt": __lowercase = line if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` """ F"""and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" " problem." ) __lowercase = None if job_name and job_links: __lowercase = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # A list with elements of the form (line of error, error, failed test) __lowercase = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return result def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowercase = [] __lowercase = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) ) return errors def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowercase = Counter() counter.update([x[1] for x in logs] ) __lowercase = counter.most_common() __lowercase = {} for error, count in counts: if error_filter is None or error not in error_filter: __lowercase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} __lowercase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) ) return r def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = test.split("::" )[0] if test.startswith("tests/models/" ): __lowercase = test.split("/" )[2] else: __lowercase = None return test def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowercase = [(x[0], x[1], get_model(x[2] )) for x in logs] __lowercase = [x for x in logs if x[2] is not None] __lowercase = {x[2] for x in logs} __lowercase = {} for test in tests: __lowercase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) __lowercase = counter.most_common() __lowercase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} __lowercase = sum(error_counts.values() ) if n_errors > 0: __lowercase = {"count": n_errors, "errors": error_counts} __lowercase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) ) return r def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = "| no. | error | status |" __lowercase = "|-:|:-|:-|" __lowercase = [header, sep] for error in reduced_by_error: __lowercase = reduced_by_error[error]["count"] __lowercase = F"""| {count} | {error[:1_0_0]} | |""" lines.append(_SCREAMING_SNAKE_CASE ) return "\n".join(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = "| model | no. of errors | major error | count |" __lowercase = "|-:|-:|-:|-:|" __lowercase = [header, sep] for model in reduced_by_model: __lowercase = reduced_by_model[model]["count"] __lowercase , __lowercase = list(reduced_by_model[model]["errors"].items() )[0] __lowercase = F"""| {model} | {count} | {error[:6_0]} | {_count} |""" lines.append(_SCREAMING_SNAKE_CASE ) return "\n".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") snake_case__ : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) snake_case__ : List[Any] = get_job_links(args.workflow_run_id, token=args.token) snake_case__ : Optional[int] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: snake_case__ : Dict = k.find(""" / """) snake_case__ : Optional[int] = k[index + len(""" / """) :] snake_case__ : Optional[Any] = v with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) snake_case__ : str = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) snake_case__ : int = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error snake_case__ : Optional[int] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors snake_case__ : Tuple = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) snake_case__ : Union[str, Any] = reduce_by_error(errors) snake_case__ : Optional[int] = reduce_by_model(errors) snake_case__ : Optional[Any] = make_github_table(reduced_by_error) snake_case__ : Union[str, Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa) with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa)
703
from ....utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ): '''simple docstring''' __lowercase = config.__dict__ __lowercase = modal_hidden_size if num_labels: __lowercase = num_labels
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE ) -> List[Any]: __lowercase = set() # edges = list of graph's edges __lowercase = get_edges(_SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __lowercase , __lowercase = edges.pop() chosen_vertices.add(_SCREAMING_SNAKE_CASE ) chosen_vertices.add(_SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_SCREAMING_SNAKE_CASE ) return chosen_vertices def snake_case_ ( _SCREAMING_SNAKE_CASE ) -> Any: __lowercase = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
704
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = StableUnCLIPImgaImgPipeline _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : int = frozenset([] ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = 32 __lowercase = embedder_hidden_size # image encoding components __lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __lowercase = input_image * 0.5 + 0.5 __lowercase = input_image.clamp(0 , 1 ) __lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _snake_case ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
655
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class _A ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int=13 , lowerCamelCase : str=7 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : str=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Tuple=99 , lowerCamelCase : Tuple=32 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Dict=37 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=512 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Optional[int]=4 , ): '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_attention_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_choices def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_attention_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : List[str] = True _snake_case : str = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = FlaxRoFormerModelTester(self ) @slow def _snake_case ( self : str ): '''simple docstring''' for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCamelCase ) __lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase ) @require_flax class _A ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) __lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(lowerCamelCase )[0] __lowercase = 50_000 __lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , lowerCamelCase ) __lowercase = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
705
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _A ( _lowercase , _lowercase ): '''simple docstring''' @register_to_config def __init__( self : Optional[Any] , *, lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__() __lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) ) # parameters for additional clip time embeddings __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) # parameters for encoder hidden states __lowercase = clip_extra_context_tokens __lowercase = nn.Linear( lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.LayerNorm(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowercase = image_embeddings.shape[0] __lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowercase = classifier_free_guidance_embeddings.expand( lowerCamelCase , -1 ) __lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowercase = self.embedding_proj(lowerCamelCase ) __lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase ) __lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase ) __lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens ) __lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowercase = self.encoder_hidden_states_proj(lowerCamelCase ) __lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase ) __lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
655
0
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () snake_case__ : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). snake_case__ : List[str] = [0, 25, 50] snake_case__ : Optional[int] = [25, 50, 75] snake_case__ : int = fuzz.membership.trimf(X, abca) snake_case__ : Dict = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. snake_case__ : List[str] = np.ones(75) snake_case__ : Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) snake_case__ : Tuple = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) snake_case__ : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) snake_case__ : Any = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) snake_case__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] snake_case__ : int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) snake_case__ : List[Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] snake_case__ : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] snake_case__ : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
706
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case__ : Union[str, Any] = TypeVar("""T""") snake_case__ : Optional[int] = TypeVar("""U""") class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ): '''simple docstring''' __lowercase = key __lowercase = val __lowercase = None __lowercase = None def __repr__( self : Any ): '''simple docstring''' return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase , __lowercase = self.rear, self.head def __repr__( self : Optional[Any] ): '''simple docstring''' __lowercase = ["DoubleLinkedList"] __lowercase = self.head while node.next is not None: rep.append(str(lowerCamelCase ) ) __lowercase = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' __lowercase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __lowercase = node __lowercase = previous __lowercase = node __lowercase = self.rear def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None __lowercase = node.next __lowercase = node.prev __lowercase = None __lowercase = None return node class _A ( Generic[T, U] ): '''simple docstring''' _snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = DoubleLinkedList() __lowercase = capacity __lowercase = 0 __lowercase = 0 __lowercase = 0 __lowercase = {} def __repr__( self : Optional[Any] ): '''simple docstring''' return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : Dict , lowerCamelCase : T ): '''simple docstring''' return key in self.cache def _snake_case ( self : List[Any] , lowerCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 __lowercase = self.cache[key] __lowercase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowerCamelCase ) return node.val self.miss += 1 return None def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __lowercase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowerCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __lowercase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __lowercase = value self.list.add(lowerCamelCase ) @classmethod def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ): '''simple docstring''' def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*lowerCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: __lowercase = LRUCache(lowerCamelCase ) __lowercase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __lowercase = func(*lowerCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = len(set_a.intersection(_SCREAMING_SNAKE_CASE ) ) if alternative_union: __lowercase = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) else: __lowercase = len(set_a.union(_SCREAMING_SNAKE_CASE ) ) return intersection / union if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): __lowercase = [element for element in set_a if element in set_b] if alternative_union: __lowercase = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) / union else: __lowercase = set_a + [element for element in set_b if element not in set_a] return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return None if __name__ == "__main__": snake_case__ : Dict = {"""a""", """b""", """c""", """d""", """e"""} snake_case__ : Any = {"""c""", """d""", """e""", """f""", """h""", """i"""} print(jaccard_similarity(set_a, set_b))
707
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) snake_case__ : Optional[Any] = logging.getLogger() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = "\n".join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random""" snake_case__ : int = """sshleifer/bart-tiny-random""" snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart""" snake_case__ : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _A ( _lowercase ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(lowerCamelCase , lowerCamelCase ) __lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_generate() assert Path(lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self : Dict ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __lowercase = Path(self.get_auto_remove_tmp_dir() ) __lowercase = str(tmp_dir / "scores.json" ) __lowercase = str(tmp_dir / "val.target" ) _dump_articles(lowerCamelCase , text["en"] ) _dump_articles(lowerCamelCase , text["de"] ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {str(lowerCamelCase )} {str(lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): with CaptureStdout() as cs: run_search() __lowercase = [" num_beams | length_penalty", model, "Best score args"] __lowercase = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase ).exists() os.remove(Path(lowerCamelCase ) )
655
0
import argparse import json import subprocess def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [] __lowercase = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" " https://api.github.com/repos/huggingface/transformers/actions/runners" ) __lowercase = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) __lowercase = output.stdout.decode("utf-8" ) __lowercase = json.loads(_SCREAMING_SNAKE_CASE ) __lowercase = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowercase = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def snake_case_ ( _SCREAMING_SNAKE_CASE ): return values.split("," ) snake_case__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) snake_case__ : Union[str, Any] = parser.parse_args() get_runner_status(args.target_runners, args.token)
708
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : int _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""") def snake_case_ ( _SCREAMING_SNAKE_CASE ): if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowercase , __lowercase = get_distrib(node.left ) __lowercase , __lowercase = get_distrib(node.right ) __lowercase = 1 - left_distrib_excess __lowercase = 1 - right_distrib_excess __lowercase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) __lowercase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
655
0
from __future__ import annotations snake_case__ : Tuple = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowercase = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] # the reference grid __lowercase = 1 __lowercase = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] # the action grid __lowercase = init[0] __lowercase = init[1] __lowercase = 0 __lowercase = g + heuristic[x][y] # cost from starting cell to destination cell __lowercase = [[f, g, x, y]] __lowercase = False # flag that is set when search is complete __lowercase = False # flag set if we can't find expand while not found and not resign: if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __lowercase = cell.pop() __lowercase = next_cell[2] __lowercase = next_cell[3] __lowercase = next_cell[1] if x == goal[0] and y == goal[1]: __lowercase = True else: for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions __lowercase = x + DIRECTIONS[i][0] __lowercase = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __lowercase = g + cost __lowercase = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __lowercase = 1 __lowercase = i __lowercase = [] __lowercase = goal[0] __lowercase = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __lowercase = x - DIRECTIONS[action[x][y]][0] __lowercase = y - DIRECTIONS[action[x][y]][1] __lowercase = xa __lowercase = ya invpath.append([x, y] ) __lowercase = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] ) return path, action if __name__ == "__main__": snake_case__ : List[str] = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] snake_case__ : Optional[int] = [0, 0] # all coordinates are given in format [y,x] snake_case__ : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1] snake_case__ : Dict = 1 # the cost map which pushes the path closer to the goal snake_case__ : Tuple = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): snake_case__ : Optional[int] = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map snake_case__ : List[Any] = 99 snake_case__ : Union[str, Any] = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
709
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = SwinvaConfig() __lowercase = swinva_name.split("_" ) __lowercase = name_split[1] if "to" in name_split[3]: __lowercase = int(name_split[3][-3:] ) else: __lowercase = int(name_split[3] ) if "to" in name_split[2]: __lowercase = int(name_split[2][-2:] ) else: __lowercase = int(name_split[2][6:] ) if model_size == "tiny": __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "small": __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "base": __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) else: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: __lowercase = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowercase = 2_1_8_4_1 __lowercase = "huggingface/label-files" __lowercase = "imagenet-22k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} else: __lowercase = 1_0_0_0 __lowercase = "huggingface/label-files" __lowercase = "imagenet-1k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = img_size __lowercase = num_classes __lowercase = embed_dim __lowercase = depths __lowercase = num_heads __lowercase = window_size return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): if "patch_embed.proj" in name: __lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowercase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowercase = "encoder." + name if "attn.proj" in name: __lowercase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowercase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowercase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowercase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowercase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowercase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __lowercase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __lowercase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __lowercase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __lowercase = "layernorm.weight" if name == "norm.bias": __lowercase = "layernorm.bias" if "head" in name: __lowercase = name.replace("head" , "classifier" ) else: __lowercase = "swinv2." + name return name def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for key in orig_state_dict.copy().keys(): __lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __lowercase = key.split("." ) __lowercase = int(key_split[1] ) __lowercase = int(key_split[3] ) __lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowercase = val[:dim, :] __lowercase = val[dim : dim * 2, :] __lowercase = val[-dim:, :] else: __lowercase = val[:dim] __lowercase = val[ dim : dim * 2 ] __lowercase = val[-dim:] else: __lowercase = val return orig_state_dict def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE ) __lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) __lowercase = timm_model(inputs["pixel_values"] ) __lowercase = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
655
0
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record snake_case__ : List[Any] = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ snake_case__ : Optional[int] = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ snake_case__ : Optional[int] = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return float((preds == labels).mean() ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ): __lowercase = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = {} for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" __lowercase = id_pred["prediction"] if question_id in question_map: question_map[question_id].append((pred, label) ) else: __lowercase = [(pred, label)] __lowercase , __lowercase = [], [] for question, preds_labels in question_map.items(): __lowercase , __lowercase = zip(*_SCREAMING_SNAKE_CASE ) __lowercase = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="macro" ) fas.append(_SCREAMING_SNAKE_CASE ) __lowercase = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) ) ems.append(_SCREAMING_SNAKE_CASE ) __lowercase = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) ) __lowercase = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) __lowercase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Tuple ): '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , ) def _snake_case ( self : Tuple ): '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "prediction_text": datasets.Value("string" ), }, "references": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "answers": datasets.Sequence(datasets.Value("string" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("int64" ), "paragraph": datasets.Value("int64" ), "question": datasets.Value("int64" ), }, "prediction": datasets.Value("int64" ), }, "references": datasets.Value("int64" ), } else: return { "predictions": datasets.Value("int64" ), "references": datasets.Value("int64" ), } def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ): '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowerCamelCase , lowerCamelCase )} elif self.config_name == "cb": return acc_and_fa(lowerCamelCase , lowerCamelCase , fa_avg="macro" ) elif self.config_name == "record": __lowercase = [ { "qas": [ {"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]} for ref in references ] } ] __lowercase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions} return evaluate_record(lowerCamelCase , lowerCamelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(lowerCamelCase , lowerCamelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )} else: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
710
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } snake_case__ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case_ ( ): __lowercase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char return pairs class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __lowercase = json.load(lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : Optional[int] ): '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(lowerCamelCase ) __lowercase = get_pairs(lowerCamelCase ) if not pairs: return token while True: __lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(lowerCamelCase ): try: __lowercase = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(lowerCamelCase ) __lowercase = new_word if len(lowerCamelCase ) == 1: break else: __lowercase = get_pairs(lowerCamelCase ) __lowercase = " ".join(lowerCamelCase ) __lowercase = word return word def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = [] for token in re.findall(self.pat , lowerCamelCase ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __lowercase = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): '''simple docstring''' __lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' __lowercase = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: __lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
655
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _lowercase ): '''simple docstring''' _snake_case : Union[str, Any] = """naver-clova-ix/donut-base-finetuned-docvqa""" _snake_case : List[Any] = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) _snake_case : Tuple = """document_qa""" _snake_case : Tuple = AutoProcessor _snake_case : int = VisionEncoderDecoderModel _snake_case : Dict = ["""image""", """text"""] _snake_case : Any = ["""text"""] def __init__( self : Tuple , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." ) super().__init__(*lowerCamelCase , **lowerCamelCase ) def _snake_case ( self : List[Any] , lowerCamelCase : "Image" , lowerCamelCase : str ): '''simple docstring''' __lowercase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" __lowercase = task_prompt.replace("{user_input}" , lowerCamelCase ) __lowercase = self.pre_processor.tokenizer( lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" ).input_ids __lowercase = self.pre_processor(lowerCamelCase , return_tensors="pt" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case ( self : List[str] , lowerCamelCase : int ): '''simple docstring''' return self.model.generate( inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCamelCase , ).sequences def _snake_case ( self : str , lowerCamelCase : List[Any] ): '''simple docstring''' __lowercase = self.pre_processor.batch_decode(lowerCamelCase )[0] __lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" ) __lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" ) __lowercase = re.sub(R"<.*?>" , "" , lowerCamelCase , count=1 ).strip() # remove first task start token __lowercase = self.pre_processor.tokenajson(lowerCamelCase ) return sequence["answer"]
711
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] # Creating a copy of the list and sorting profit/weight in ascending order __lowercase = sorted(_SCREAMING_SNAKE_CASE ) # declaring useful variables __lowercase = len(_SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __lowercase = sorted_profit_by_weight[length - i - 1] __lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE ) __lowercase = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()] snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()] snake_case__ : Optional[Any] = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 ): __lowercase = set(range(3 , _SCREAMING_SNAKE_CASE , 2 ) ) primes.add(2 ) for p in range(3 , _SCREAMING_SNAKE_CASE , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ) __lowercase = [float(_SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )] for p in primes: for n in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
712
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """openai/whisper-base""" _snake_case : Union[str, Any] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) _snake_case : Any = """transcriber""" _snake_case : Any = WhisperProcessor _snake_case : Optional[int] = WhisperForConditionalGeneration _snake_case : str = ["""audio"""] _snake_case : Optional[int] = ["""text"""] def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features def _snake_case ( self : str , lowerCamelCase : List[Any] ): '''simple docstring''' return self.model.generate(inputs=lowerCamelCase ) def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
655
0
from __future__ import annotations def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = position __lowercase = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] __lowercase = [] for position in positions: __lowercase , __lowercase = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_SCREAMING_SNAKE_CASE ) return permissible_positions def snake_case_ ( _SCREAMING_SNAKE_CASE ): return not any(elem == 0 for row in board for elem in row ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if is_complete(_SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): __lowercase , __lowercase = position if board[y][x] == 0: __lowercase = curr + 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ): return True __lowercase = 0 return False def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): __lowercase = 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board __lowercase = 0 __lowercase = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
713
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _A : '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["prompt"] __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] if "image" in inputs: __lowercase = inputs["image"] else: __lowercase = None if "mask_image" in inputs: __lowercase = inputs["mask_image"] else: __lowercase = None if "original_image" in inputs: __lowercase = inputs["original_image"] else: __lowercase = None __lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase ) # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 )
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0 ): __lowercase = 1 __lowercase = 2 for i in range(2 , max_n + 1 ): __lowercase = pre_numerator __lowercase = 2 * i // 3 if i % 3 == 0 else 1 __lowercase = cur_numerator __lowercase = e_cont * pre_numerator + temp return sum_digits(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'''{solution() = }''')
714
import numpy as np snake_case__ : Tuple = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class _A : '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase , __lowercase = np.where(letter == self.SQUARE ) __lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() __lowercase = message.replace(" " , "" ) __lowercase = message.replace("j" , "i" ) __lowercase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape(2 * len(lowerCamelCase ) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[numbers_index * 2] ) __lowercase = int(second_step[(numbers_index * 2) + 1] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = encoded_message + letter return encoded_message def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() message.replace(" " , "" ) __lowercase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape((2, len(lowerCamelCase )) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[0, numbers_index] ) __lowercase = int(second_step[1, numbers_index] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = decoded_message + letter return decoded_message
655
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ : Any = logging.get_logger(__name__) class _A ( _lowercase , _lowercase ): '''simple docstring''' _snake_case : Dict = """maskformer-swin""" _snake_case : List[str] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowerCamelCase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) ) __lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
715
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _A ( ctypes.Structure ): '''simple docstring''' _snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def snake_case_ ( ): try: hide_cursor() yield finally: show_cursor()
655
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = SwinvaConfig() __lowercase = swinva_name.split("_" ) __lowercase = name_split[1] if "to" in name_split[3]: __lowercase = int(name_split[3][-3:] ) else: __lowercase = int(name_split[3] ) if "to" in name_split[2]: __lowercase = int(name_split[2][-2:] ) else: __lowercase = int(name_split[2][6:] ) if model_size == "tiny": __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "small": __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "base": __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) else: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: __lowercase = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowercase = 2_1_8_4_1 __lowercase = "huggingface/label-files" __lowercase = "imagenet-22k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} else: __lowercase = 1_0_0_0 __lowercase = "huggingface/label-files" __lowercase = "imagenet-1k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = img_size __lowercase = num_classes __lowercase = embed_dim __lowercase = depths __lowercase = num_heads __lowercase = window_size return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): if "patch_embed.proj" in name: __lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowercase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowercase = "encoder." + name if "attn.proj" in name: __lowercase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowercase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowercase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowercase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowercase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowercase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __lowercase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __lowercase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __lowercase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __lowercase = "layernorm.weight" if name == "norm.bias": __lowercase = "layernorm.bias" if "head" in name: __lowercase = name.replace("head" , "classifier" ) else: __lowercase = "swinv2." + name return name def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for key in orig_state_dict.copy().keys(): __lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __lowercase = key.split("." ) __lowercase = int(key_split[1] ) __lowercase = int(key_split[3] ) __lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowercase = val[:dim, :] __lowercase = val[dim : dim * 2, :] __lowercase = val[-dim:, :] else: __lowercase = val[:dim] __lowercase = val[ dim : dim * 2 ] __lowercase = val[-dim:] else: __lowercase = val return orig_state_dict def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE ) __lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) __lowercase = timm_model(inputs["pixel_values"] ) __lowercase = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
716
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : List[str] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """yolos""" def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = version.parse("""1.11""" ) @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : str ): '''simple docstring''' return 1e-4 @property def _snake_case ( self : Tuple ): '''simple docstring''' return 12
655
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging snake_case__ : Dict = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' _snake_case : Tuple = ["""input_features""", """attention_mask"""] def __init__( self : Any , lowerCamelCase : List[str]=80 , lowerCamelCase : Optional[int]=16_000 , lowerCamelCase : List[str]=80 , lowerCamelCase : Dict=0.0 , lowerCamelCase : int=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Dict=True , **lowerCamelCase : List[str] , ): '''simple docstring''' super().__init__(feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase ) __lowercase = num_mel_bins __lowercase = do_ceptral_normalize __lowercase = normalize_means __lowercase = normalize_vars __lowercase = True def _snake_case ( self : Any , lowerCamelCase : np.ndarray , ): '''simple docstring''' __lowercase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __lowercase = torch.from_numpy(lowerCamelCase ).unsqueeze(0 ) __lowercase = ta_kaldi.fbank(lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _snake_case ( lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[bool] = True , lowerCamelCase : Optional[bool] = True , lowerCamelCase : float = 0.0 , ): '''simple docstring''' if normalize_means: __lowercase = x[:input_length].mean(axis=0 ) __lowercase = np.subtract(lowerCamelCase , lowerCamelCase ) if normalize_vars: __lowercase = x[:input_length].std(axis=0 ) __lowercase = np.divide(lowerCamelCase , lowerCamelCase ) if input_length < x.shape[0]: __lowercase = padding_value # make sure array is in float32 __lowercase = x.astype(np.floataa ) return x def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[np.ndarray] , lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(lowerCamelCase , lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(lowerCamelCase , lowerCamelCase ) ] def __call__( self : Dict , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , **lowerCamelCase : Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) __lowercase = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __lowercase = is_batched_numpy or ( isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ): __lowercase = np.asarray(lowerCamelCase , dtype=np.floataa ) elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase = [raw_speech] # extract fbank features __lowercase = [self._extract_fbank_features(lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __lowercase = BatchFeature({"input_features": features} ) __lowercase = self.pad( lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , ) # make sure list is in array format __lowercase = padded_inputs.get("input_features" ) if isinstance(input_features[0] , lowerCamelCase ): __lowercase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_features] __lowercase = padded_inputs.get("attention_mask" ) if attention_mask is not None: __lowercase = [np.asarray(lowerCamelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __lowercase = ( np.array(lowerCamelCase , dtype=np.intaa ) if self._get_padding_strategies(lowerCamelCase , max_length=lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __lowercase = self.normalize( padded_inputs["input_features"] , attention_mask=lowerCamelCase ) if return_tensors is not None: __lowercase = padded_inputs.convert_to_tensors(lowerCamelCase ) return padded_inputs
717
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[int] = logging.get_logger(__name__) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) __lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE ) if matches: __lowercase = float(matches[1] ) __lowercase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __lowercase = 1_0_0_1 __lowercase = "imagenet-1k-id2label.json" __lowercase = "huggingface/label-files" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} __lowercase = "background" __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} return config def snake_case_ ( ): __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE ) # Load 🤗 model __lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __lowercase = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , ) __lowercase = image_processor(images=prepare_img() , return_tensors="pt" ) __lowercase = model(**_SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits assert logits.shape == (1, 1_0_0_1) if model_name == "mobilenet_v1_1.0_224": __lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": __lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: __lowercase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("Pushing to the hub..." ) __lowercase = "google/" + model_name image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""mobilenet_v1_1.0_224""", type=str, help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""", ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
655
0
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : int = LayoutLMTokenizer _snake_case : List[str] = LayoutLMTokenizerFast _snake_case : List[str] = True _snake_case : Tuple = True def _snake_case ( self : Optional[int] ): '''simple docstring''' super().setUp() __lowercase = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _snake_case ( self : Dict , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase ) def _snake_case ( self : Dict , lowerCamelCase : Any ): '''simple docstring''' __lowercase = "UNwant\u00E9d,running" __lowercase = "unwanted, running" return input_text, output_text def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.tokenizer_class(self.vocab_file ) __lowercase = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] ) def _snake_case ( self : Dict ): '''simple docstring''' pass
718
from __future__ import annotations from typing import Any class _A : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def _snake_case ( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(lowerCamelCase ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def snake_case_ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
655
0
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants snake_case__ : List[Any] = Mapping[str, np.ndarray] snake_case__ : Any = Mapping[str, Any] # Is a nested dict. snake_case__ : Any = 0.0_1 @dataclasses.dataclass(frozen=_lowercase ) class _A : '''simple docstring''' _snake_case : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. _snake_case : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. _snake_case : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. _snake_case : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. _snake_case : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions _snake_case : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files _snake_case : Optional[str] = None # Templates used to generate this protein (prediction-only) _snake_case : Optional[Sequence[str]] = None # Chain corresponding to each parent _snake_case : Optional[Sequence[int]] = None def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = R"(\[[A-Z]+\]\n)" __lowercase = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0] __lowercase = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) __lowercase = ["N", "CA", "C"] __lowercase = None __lowercase = None __lowercase = None for g in groups: if "[PRIMARY]" == g[0]: __lowercase = g[1][0].strip() for i in range(len(_SCREAMING_SNAKE_CASE ) ): if seq[i] not in residue_constants.restypes: __lowercase = "X" # FIXME: strings are immutable __lowercase = np.array( [residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __lowercase = [] for axis in range(3 ): tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) ) __lowercase = np.array(_SCREAMING_SNAKE_CASE ) __lowercase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __lowercase = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) __lowercase = np.zeros( ( len(_SCREAMING_SNAKE_CASE ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ): __lowercase = [] __lowercase = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) __lowercase = prot.parents __lowercase = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __lowercase = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id] if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0: __lowercase = ["N/A"] pdb_headers.append(F"""PARENT {' '.join(_SCREAMING_SNAKE_CASE )}""" ) return pdb_headers def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [] __lowercase = pdb_str.split("\n" ) __lowercase = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) __lowercase = 4_2 if prot.parents is not None and len(prot.parents ) > 0: __lowercase = [] if prot.parents_chain_index is not None: __lowercase = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] ) parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE ) __lowercase = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __lowercase = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ["N/A"] ) parents_per_chain.append(_SCREAMING_SNAKE_CASE ) else: parents_per_chain.append(list(prot.parents ) ) else: __lowercase = [["N/A"]] def make_parent_line(_SCREAMING_SNAKE_CASE ) -> str: return F"""PARENT {' '.join(_SCREAMING_SNAKE_CASE )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __lowercase = 0 for i, l in enumerate(_SCREAMING_SNAKE_CASE ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_SCREAMING_SNAKE_CASE ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_SCREAMING_SNAKE_CASE ): __lowercase = parents_per_chain[chain_counter] else: __lowercase = ["N/A"] out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) ) return "\n".join(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = residue_constants.restypes + ["X"] def res_atoa(_SCREAMING_SNAKE_CASE ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) __lowercase = residue_constants.atom_types __lowercase = [] __lowercase = prot.atom_mask __lowercase = prot.aatype __lowercase = prot.atom_positions __lowercase = prot.residue_index.astype(np.intaa ) __lowercase = prot.b_factors __lowercase = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) __lowercase = get_pdb_headers(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: pdb_lines.extend(_SCREAMING_SNAKE_CASE ) __lowercase = aatype.shape[0] __lowercase = 1 __lowercase = 0 __lowercase = string.ascii_uppercase __lowercase = None # Add all atom sites. for i in range(_SCREAMING_SNAKE_CASE ): __lowercase = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __lowercase = "ATOM" __lowercase = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}""" __lowercase = "" __lowercase = "" __lowercase = 1.0_0 __lowercase = atom_name[0] # Protein supports only C, N, O, S, this works. __lowercase = "" __lowercase = "A" if chain_index is not None: __lowercase = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __lowercase = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 __lowercase = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __lowercase = True __lowercase = chain_index[i + 1] if should_terminate: # Close the chain. __lowercase = "TER" __lowercase = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(_SCREAMING_SNAKE_CASE ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ): return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
719
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : List[str] = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
655
0
import os from collections import deque import torch from torch.utils.data import Dataset class _A ( _lowercase ): '''simple docstring''' def __init__( self : List[Any] , lowerCamelCase : Dict="" , lowerCamelCase : int="train" ): '''simple docstring''' assert os.path.isdir(lowerCamelCase ) __lowercase = [] __lowercase = os.listdir(lowerCamelCase ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) if not os.path.isfile(lowerCamelCase ): continue self.documents.append(lowerCamelCase ) def __len__( self : Dict ): '''simple docstring''' return len(self.documents ) def __getitem__( self : int , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = self.documents[idx] __lowercase = document_path.split("/" )[-1] with open(lowerCamelCase , encoding="utf-8" ) as source: __lowercase = source.read() __lowercase , __lowercase = process_story(lowerCamelCase ) return document_name, story_lines, summary_lines def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = list(filter(lambda _SCREAMING_SNAKE_CASE : len(_SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it __lowercase = [_add_missing_period(_SCREAMING_SNAKE_CASE ) for line in nonempty_lines] # gather article lines __lowercase = [] __lowercase = deque(_SCREAMING_SNAKE_CASE ) while True: try: __lowercase = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(_SCREAMING_SNAKE_CASE ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __lowercase = list(filter(lambda _SCREAMING_SNAKE_CASE : not t.startswith("@highlight" ) , _SCREAMING_SNAKE_CASE ) ) return story_lines, summary_lines def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(_SCREAMING_SNAKE_CASE )) ) return sequence def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = torch.ones_like(_SCREAMING_SNAKE_CASE ) __lowercase = sequence == pad_token_id __lowercase = 0 return mask def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in story_lines] __lowercase = [token for sentence in story_lines_token_ids for token in sentence] __lowercase = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in summary_lines] __lowercase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [] for sequence in batch: __lowercase = -1 __lowercase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(_SCREAMING_SNAKE_CASE ) return torch.tensor(_SCREAMING_SNAKE_CASE )
720
from __future__ import annotations import bisect def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = 0 __lowercase = len(_SCREAMING_SNAKE_CASE ) - 1 while left <= right: __lowercase = left + (right - left) // 2 __lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowercase = midpoint - 1 else: __lowercase = midpoint + 1 return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item: return index return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if right < left: return None __lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 ) else: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip() snake_case__ : Any = sorted(int(item) for item in user_input.split(""",""")) snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n""")) snake_case__ : List[Any] = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
655
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : List[str] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """yolos""" def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = version.parse("""1.11""" ) @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : str ): '''simple docstring''' return 1e-4 @property def _snake_case ( self : Tuple ): '''simple docstring''' return 12
721
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """conditional_detr""" _snake_case : Union[str, Any] = ["""past_key_values"""] _snake_case : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = backbone_config.get("model_type" ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(lowerCamelCase ) __lowercase = use_timm_backbone __lowercase = backbone_config __lowercase = num_channels __lowercase = num_queries __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = init_xavier_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = encoder_layers __lowercase = auxiliary_loss __lowercase = position_embedding_type __lowercase = backbone __lowercase = use_pretrained_backbone __lowercase = dilation # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = mask_loss_coefficient __lowercase = dice_loss_coefficient __lowercase = cls_loss_coefficient __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return self.encoder_attention_heads @property def _snake_case ( self : str ): '''simple docstring''' return self.d_model def _snake_case ( self : int ): '''simple docstring''' __lowercase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output class _A ( _lowercase ): '''simple docstring''' _snake_case : Any = version.parse("""1.11""" ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _snake_case ( self : Any ): '''simple docstring''' return 1e-5 @property def _snake_case ( self : Optional[Any] ): '''simple docstring''' return 12
655
0
'''simple docstring''' from __future__ import annotations import time snake_case__ : Tuple = list[tuple[int, int]] snake_case__ : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] snake_case__ : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class _A : '''simple docstring''' def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Node | None ): '''simple docstring''' __lowercase = pos_x __lowercase = pos_y __lowercase = (pos_y, pos_x) __lowercase = goal_x __lowercase = goal_y __lowercase = parent class _A : '''simple docstring''' def __init__( self : int , lowerCamelCase : tuple[int, int] , lowerCamelCase : tuple[int, int] ): '''simple docstring''' __lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase ) __lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase ) __lowercase = [self.start] __lowercase = False def _snake_case ( self : Optional[int] ): '''simple docstring''' while self.node_queue: __lowercase = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: __lowercase = True return self.retrace_path(lowerCamelCase ) __lowercase = self.get_successors(lowerCamelCase ) for node in successors: self.node_queue.append(lowerCamelCase ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : Dict , lowerCamelCase : Node ): '''simple docstring''' __lowercase = [] for action in delta: __lowercase = parent.pos_x + action[1] __lowercase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , lowerCamelCase ) ) return successors def _snake_case ( self : Any , lowerCamelCase : Node | None ): '''simple docstring''' __lowercase = node __lowercase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowercase = current_node.parent path.reverse() return path class _A : '''simple docstring''' def __init__( self : Any , lowerCamelCase : Any , lowerCamelCase : str ): '''simple docstring''' __lowercase = BreadthFirstSearch(lowerCamelCase , lowerCamelCase ) __lowercase = BreadthFirstSearch(lowerCamelCase , lowerCamelCase ) __lowercase = False def _snake_case ( self : Any ): '''simple docstring''' while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: __lowercase = self.fwd_bfs.node_queue.pop(0 ) __lowercase = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: __lowercase = True return self.retrace_bidirectional_path( lowerCamelCase , lowerCamelCase ) __lowercase = current_bwd_node __lowercase = current_fwd_node __lowercase = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCamelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _snake_case ( self : int , lowerCamelCase : Node , lowerCamelCase : Node ): '''simple docstring''' __lowercase = self.fwd_bfs.retrace_path(lowerCamelCase ) __lowercase = self.bwd_bfs.retrace_path(lowerCamelCase ) bwd_path.pop() bwd_path.reverse() __lowercase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() snake_case__ : Dict = (0, 0) snake_case__ : List[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) snake_case__ : Any = time.time() snake_case__ : str = BreadthFirstSearch(init, goal) snake_case__ : int = bfs.search() snake_case__ : List[str] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) snake_case__ : List[Any] = time.time() snake_case__ : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal) snake_case__ : Union[str, Any] = bd_bfs.search() snake_case__ : Union[str, Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
700
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ : Any = logging.get_logger(__name__) class _A ( _lowercase , _lowercase ): '''simple docstring''' _snake_case : Dict = """maskformer-swin""" _snake_case : List[str] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowerCamelCase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) ) __lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
655
0
import inspect import unittest class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] ): '''simple docstring''' try: import diffusers # noqa: F401 except ImportError: assert False def _snake_case ( self : List[Any] ): '''simple docstring''' import diffusers from diffusers.dependency_versions_table import deps __lowercase = inspect.getmembers(lowerCamelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": __lowercase = "k-diffusion" elif backend == "invisible_watermark": __lowercase = "invisible-watermark" assert backend in deps, f"""{backend} is not in the deps table!"""
701
def snake_case_ ( _SCREAMING_SNAKE_CASE ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = int(sequence[i] , 2 ) return sequence def snake_case_ ( _SCREAMING_SNAKE_CASE ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __lowercase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __lowercase = gray_code_sequence_string(bit_count - 1 ) __lowercase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __lowercase = "0" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __lowercase = "1" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
655
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _A ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Dict=3 , lowerCamelCase : Dict=18 , lowerCamelCase : Dict=30 , lowerCamelCase : Optional[Any]=400 , lowerCamelCase : Dict=True , lowerCamelCase : Any=None , lowerCamelCase : Optional[Any]=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : int=[0.5, 0.5, 0.5] , lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ): '''simple docstring''' __lowercase = size if size is not None else {"shortest_edge": 18} __lowercase = crop_size if crop_size is not None else {"height": 18, "width": 18} __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = min_resolution __lowercase = max_resolution __lowercase = do_resize __lowercase = size __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_normalize __lowercase = image_mean __lowercase = image_std def _snake_case ( self : Dict ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Optional[int] = LevitImageProcessor if is_vision_available() else None def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = LevitImageProcessingTester(self ) @property def _snake_case ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCamelCase , "size" ) ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) __lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' pass def _snake_case ( self : int ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
702
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ): model.train() __lowercase = model(_SCREAMING_SNAKE_CASE ) __lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): set_seed(4_2 ) __lowercase = RegressionModel() __lowercase = deepcopy(_SCREAMING_SNAKE_CASE ) __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __lowercase = AdamW(params=model.parameters() , lr=1E-3 ) __lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) # Make a copy of `model` if sched: __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test when on a single CPU or GPU that the context manager does nothing __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test on distributed setup that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n""" __lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def snake_case_ ( ): __lowercase = Accelerator() __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase = RegressionDataset(length=9_6 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if iteration < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def snake_case_ ( ): __lowercase = Accelerator() __lowercase = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
655
0
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor snake_case__ : Tuple = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' def __init__( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : str ): '''simple docstring''' warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
703
from ....utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ): '''simple docstring''' __lowercase = config.__dict__ __lowercase = modal_hidden_size if num_labels: __lowercase = num_labels
655
0
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : '''simple docstring''' @staticmethod def _snake_case ( *lowerCamelCase : Any , **lowerCamelCase : Dict ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class _A ( unittest.TestCase ): '''simple docstring''' _snake_case : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ): '''simple docstring''' __lowercase = ObjectDetectionPipeline(model=lowerCamelCase , image_processor=lowerCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _snake_case ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[str] ): '''simple docstring''' __lowercase = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(lowerCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase , { "score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase ), "box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )}, } , ) import datasets __lowercase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) __lowercase = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] __lowercase = object_detector(lowerCamelCase , threshold=0.0 ) self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(lowerCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase , { "score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase ), "box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' pass @require_torch def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = "hf-internal-testing/tiny-detr-mobilenetsv3" __lowercase = AutoModelForObjectDetection.from_pretrained(lowerCamelCase ) __lowercase = AutoFeatureExtractor.from_pretrained(lowerCamelCase ) __lowercase = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) __lowercase = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def _snake_case ( self : int ): '''simple docstring''' __lowercase = "facebook/detr-resnet-50" __lowercase = AutoModelForObjectDetection.from_pretrained(lowerCamelCase ) __lowercase = AutoFeatureExtractor.from_pretrained(lowerCamelCase ) __lowercase = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) __lowercase = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "facebook/detr-resnet-50" __lowercase = pipeline("object-detection" , model=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) __lowercase = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = 0.9985 __lowercase = "facebook/detr-resnet-50" __lowercase = pipeline("object-detection" , model=lowerCamelCase ) __lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCamelCase ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = "Narsil/layoutlmv3-finetuned-funsd" __lowercase = 0.9993 __lowercase = pipeline("object-detection" , model=lowerCamelCase , threshold=lowerCamelCase ) __lowercase = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=4 ) , [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
704
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = StableUnCLIPImgaImgPipeline _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : int = frozenset([] ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = 32 __lowercase = embedder_hidden_size # image encoding components __lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __lowercase = input_image * 0.5 + 0.5 __lowercase = input_image.clamp(0 , 1 ) __lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _snake_case ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] __lowercase = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key: return input_string for position, character in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase = position % (lowest * 2) # puts it in bounds __lowercase = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_SCREAMING_SNAKE_CASE ) __lowercase = ["".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid] __lowercase = "".join(_SCREAMING_SNAKE_CASE ) return output_string def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = [] __lowercase = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string __lowercase = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template for position in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = position % (lowest * 2) # puts it in bounds __lowercase = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) __lowercase = 0 for row in temp_grid: # fills in the characters __lowercase = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )] grid.append(list(_SCREAMING_SNAKE_CASE ) ) counter += len(_SCREAMING_SNAKE_CASE ) __lowercase = "" # reads as zigzag for position in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = position % (lowest * 2) # puts it in bounds __lowercase = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = {} for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key __lowercase = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return results if __name__ == "__main__": import doctest doctest.testmod()
705
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _A ( _lowercase , _lowercase ): '''simple docstring''' @register_to_config def __init__( self : Optional[Any] , *, lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__() __lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) ) # parameters for additional clip time embeddings __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) # parameters for encoder hidden states __lowercase = clip_extra_context_tokens __lowercase = nn.Linear( lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.LayerNorm(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowercase = image_embeddings.shape[0] __lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowercase = classifier_free_guidance_embeddings.expand( lowerCamelCase , -1 ) __lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowercase = self.embedding_proj(lowerCamelCase ) __lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase ) __lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase ) __lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens ) __lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowercase = self.encoder_hidden_states_proj(lowerCamelCase ) __lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase ) __lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
655
0
from __future__ import annotations def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = len(_SCREAMING_SNAKE_CASE ) # We need to create solution object to save path. __lowercase = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] __lowercase = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE ) if solved: print("\n".join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) ) else: print("No solution exists!" ) return solved def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = len(_SCREAMING_SNAKE_CASE ) # Final check point. if i == j == (size - 1): __lowercase = 1 return True __lowercase = (not i < 0) and (not j < 0) # Check lower bounds __lowercase = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. __lowercase = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited __lowercase = 1 # check for directions if ( run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE ) ): return True __lowercase = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
706
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case__ : Union[str, Any] = TypeVar("""T""") snake_case__ : Optional[int] = TypeVar("""U""") class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ): '''simple docstring''' __lowercase = key __lowercase = val __lowercase = None __lowercase = None def __repr__( self : Any ): '''simple docstring''' return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase , __lowercase = self.rear, self.head def __repr__( self : Optional[Any] ): '''simple docstring''' __lowercase = ["DoubleLinkedList"] __lowercase = self.head while node.next is not None: rep.append(str(lowerCamelCase ) ) __lowercase = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' __lowercase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __lowercase = node __lowercase = previous __lowercase = node __lowercase = self.rear def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None __lowercase = node.next __lowercase = node.prev __lowercase = None __lowercase = None return node class _A ( Generic[T, U] ): '''simple docstring''' _snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = DoubleLinkedList() __lowercase = capacity __lowercase = 0 __lowercase = 0 __lowercase = 0 __lowercase = {} def __repr__( self : Optional[Any] ): '''simple docstring''' return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : Dict , lowerCamelCase : T ): '''simple docstring''' return key in self.cache def _snake_case ( self : List[Any] , lowerCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 __lowercase = self.cache[key] __lowercase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowerCamelCase ) return node.val self.miss += 1 return None def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __lowercase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowerCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __lowercase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __lowercase = value self.list.add(lowerCamelCase ) @classmethod def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ): '''simple docstring''' def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*lowerCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: __lowercase = LRUCache(lowerCamelCase ) __lowercase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __lowercase = func(*lowerCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
655
0
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _A ( _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : List[str] = RoCBertTokenizer _snake_case : Optional[Any] = None _snake_case : int = False _snake_case : Optional[int] = True _snake_case : Optional[Any] = filter_non_english def _snake_case ( self : Optional[Any] ): '''simple docstring''' super().setUp() __lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] __lowercase = {} __lowercase = {} for i, value in enumerate(lowerCamelCase ): __lowercase = i __lowercase = i __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(lowerCamelCase , lowerCamelCase , ensure_ascii=lowerCamelCase ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(lowerCamelCase , lowerCamelCase , ensure_ascii=lowerCamelCase ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __lowercase = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(lowerCamelCase , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __lowercase = {} for i, token in enumerate(lowerCamelCase ): __lowercase = i __lowercase = RoCBertWordpieceTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _snake_case ( self : int ): '''simple docstring''' self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _snake_case ( self : Tuple ): '''simple docstring''' self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _snake_case ( self : Any ): '''simple docstring''' self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: __lowercase = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __lowercase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __lowercase = tokenizer_r.encode_plus( lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , ) __lowercase = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , "do_lower_case" ) else False __lowercase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = ["的", "人", "有"] __lowercase = "".join(lowerCamelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowercase = True __lowercase = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __lowercase = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase ) __lowercase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCamelCase , lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __lowercase = False __lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __lowercase = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __lowercase = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase ) __lowercase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase ) # it is expected that only the first Chinese character is not preceded by "##". __lowercase = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase ) ] self.assertListEqual(lowerCamelCase , lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) @slow def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __lowercase = tokenizer.encode("你好" , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer.encode("你是谁" , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ) __lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _snake_case ( self : Any ): '''simple docstring''' __lowercase = self.get_tokenizers(do_lower_case=lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowercase = "你好,你是谁" __lowercase = tokenizer.tokenize(lowerCamelCase ) __lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase ) __lowercase = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase ) __lowercase = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase ) __lowercase = tokenizer.prepare_for_model( lowerCamelCase , lowerCamelCase , lowerCamelCase , add_special_tokens=lowerCamelCase ) __lowercase = tokenizer.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase )
707
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) snake_case__ : Optional[Any] = logging.getLogger() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = "\n".join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random""" snake_case__ : int = """sshleifer/bart-tiny-random""" snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart""" snake_case__ : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _A ( _lowercase ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(lowerCamelCase , lowerCamelCase ) __lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_generate() assert Path(lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self : Dict ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __lowercase = Path(self.get_auto_remove_tmp_dir() ) __lowercase = str(tmp_dir / "scores.json" ) __lowercase = str(tmp_dir / "val.target" ) _dump_articles(lowerCamelCase , text["en"] ) _dump_articles(lowerCamelCase , text["de"] ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {str(lowerCamelCase )} {str(lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): with CaptureStdout() as cs: run_search() __lowercase = [" num_beams | length_penalty", model, "Best score args"] __lowercase = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase ).exists() os.remove(Path(lowerCamelCase ) )
655
0
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case__ : int = """CompVis/stable-diffusion-v1-1""" snake_case__ : Dict = """CompVis/stable-diffusion-v1-2""" snake_case__ : Dict = """CompVis/stable-diffusion-v1-3""" snake_case__ : Optional[Any] = """CompVis/stable-diffusion-v1-4""" class _A ( _lowercase ): '''simple docstring''' def __init__( self : Tuple , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , lowerCamelCase : bool = True , ): '''simple docstring''' super()._init_() __lowercase = StableDiffusionPipeline.from_pretrained(lowerCamelCase ) __lowercase = StableDiffusionPipeline.from_pretrained(lowerCamelCase ) __lowercase = StableDiffusionPipeline.from_pretrained(lowerCamelCase ) __lowercase = StableDiffusionPipeline( vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , requires_safety_checker=lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return {k: getattr(self , lowerCamelCase ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCamelCase ) def _snake_case ( self : Tuple ): '''simple docstring''' self.enable_attention_slicing(lowerCamelCase ) @torch.no_grad() def _snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Tuple , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : int , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Any , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Union[str, Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : Any , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : Optional[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = "cuda" if torch.cuda.is_available() else "cpu" self.to(lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowercase = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 __lowercase = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 __lowercase = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 __lowercase = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
708
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : int _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""") def snake_case_ ( _SCREAMING_SNAKE_CASE ): if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowercase , __lowercase = get_distrib(node.left ) __lowercase , __lowercase = get_distrib(node.right ) __lowercase = 1 - left_distrib_excess __lowercase = 1 - right_distrib_excess __lowercase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) __lowercase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
655
0
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case__ : List[Any] = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
709
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = SwinvaConfig() __lowercase = swinva_name.split("_" ) __lowercase = name_split[1] if "to" in name_split[3]: __lowercase = int(name_split[3][-3:] ) else: __lowercase = int(name_split[3] ) if "to" in name_split[2]: __lowercase = int(name_split[2][-2:] ) else: __lowercase = int(name_split[2][6:] ) if model_size == "tiny": __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "small": __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "base": __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) else: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: __lowercase = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowercase = 2_1_8_4_1 __lowercase = "huggingface/label-files" __lowercase = "imagenet-22k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} else: __lowercase = 1_0_0_0 __lowercase = "huggingface/label-files" __lowercase = "imagenet-1k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = img_size __lowercase = num_classes __lowercase = embed_dim __lowercase = depths __lowercase = num_heads __lowercase = window_size return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): if "patch_embed.proj" in name: __lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowercase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowercase = "encoder." + name if "attn.proj" in name: __lowercase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowercase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowercase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowercase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowercase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowercase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __lowercase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __lowercase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __lowercase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __lowercase = "layernorm.weight" if name == "norm.bias": __lowercase = "layernorm.bias" if "head" in name: __lowercase = name.replace("head" , "classifier" ) else: __lowercase = "swinv2." + name return name def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for key in orig_state_dict.copy().keys(): __lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __lowercase = key.split("." ) __lowercase = int(key_split[1] ) __lowercase = int(key_split[3] ) __lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowercase = val[:dim, :] __lowercase = val[dim : dim * 2, :] __lowercase = val[-dim:, :] else: __lowercase = val[:dim] __lowercase = val[ dim : dim * 2 ] __lowercase = val[-dim:] else: __lowercase = val return orig_state_dict def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE ) __lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) __lowercase = timm_model(inputs["pixel_values"] ) __lowercase = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
655
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
710
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } snake_case__ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case_ ( ): __lowercase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char return pairs class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __lowercase = json.load(lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : Optional[int] ): '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(lowerCamelCase ) __lowercase = get_pairs(lowerCamelCase ) if not pairs: return token while True: __lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(lowerCamelCase ): try: __lowercase = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(lowerCamelCase ) __lowercase = new_word if len(lowerCamelCase ) == 1: break else: __lowercase = get_pairs(lowerCamelCase ) __lowercase = " ".join(lowerCamelCase ) __lowercase = word return word def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = [] for token in re.findall(self.pat , lowerCamelCase ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __lowercase = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): '''simple docstring''' __lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' __lowercase = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: __lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
655
0
from collections.abc import Generator from math import sin def snake_case_ ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != 3_2: raise ValueError("Input must be of length 32" ) __lowercase = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def snake_case_ ( _SCREAMING_SNAKE_CASE ): if i < 0: raise ValueError("Input must be non-negative" ) __lowercase = format(_SCREAMING_SNAKE_CASE , "08x" )[-8:] __lowercase = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = b"" for char in message: bit_string += format(_SCREAMING_SNAKE_CASE , "08b" ).encode("utf-8" ) __lowercase = format(len(_SCREAMING_SNAKE_CASE ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_SCREAMING_SNAKE_CASE ) % 5_1_2 != 4_4_8: bit_string += b"0" bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] ) return bit_string def snake_case_ ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) % 5_1_2 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(_SCREAMING_SNAKE_CASE ) , 5_1_2 ): __lowercase = bit_string[pos : pos + 5_1_2] __lowercase = [] for i in range(0 , 5_1_2 , 3_2 ): block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) ) yield block_words def snake_case_ ( _SCREAMING_SNAKE_CASE ): if i < 0: raise ValueError("Input must be non-negative" ) __lowercase = format(_SCREAMING_SNAKE_CASE , "032b" ) __lowercase = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(_SCREAMING_SNAKE_CASE , 2 ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return (a + b) % 2**3_2 def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2 def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = preprocess(_SCREAMING_SNAKE_CASE ) __lowercase = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )] # Starting states __lowercase = 0x6_7_4_5_2_3_0_1 __lowercase = 0xe_f_c_d_a_b_8_9 __lowercase = 0x9_8_b_a_d_c_f_e __lowercase = 0x1_0_3_2_5_4_7_6 __lowercase = [ 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_SCREAMING_SNAKE_CASE ): __lowercase = aa __lowercase = ba __lowercase = ca __lowercase = da # Hash current chunk for i in range(6_4 ): if i <= 1_5: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowercase = d ^ (b & (c ^ d)) __lowercase = i elif i <= 3_1: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowercase = c ^ (d & (b ^ c)) __lowercase = (5 * i + 1) % 1_6 elif i <= 4_7: __lowercase = b ^ c ^ d __lowercase = (3 * i + 5) % 1_6 else: __lowercase = c ^ (b | not_aa(_SCREAMING_SNAKE_CASE )) __lowercase = (7 * i) % 1_6 __lowercase = (f + a + added_consts[i] + block_words[g]) % 2**3_2 __lowercase = d __lowercase = c __lowercase = b __lowercase = sum_aa(_SCREAMING_SNAKE_CASE , left_rotate_aa(_SCREAMING_SNAKE_CASE , shift_amounts[i] ) ) # Add hashed chunk to running total __lowercase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = sum_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) + reformat_hex(_SCREAMING_SNAKE_CASE ) return digest if __name__ == "__main__": import doctest doctest.testmod()
711
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] # Creating a copy of the list and sorting profit/weight in ascending order __lowercase = sorted(_SCREAMING_SNAKE_CASE ) # declaring useful variables __lowercase = len(_SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __lowercase = sorted_profit_by_weight[length - i - 1] __lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE ) __lowercase = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()] snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()] snake_case__ : Optional[Any] = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
712
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """openai/whisper-base""" _snake_case : Union[str, Any] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) _snake_case : Any = """transcriber""" _snake_case : Any = WhisperProcessor _snake_case : Optional[int] = WhisperForConditionalGeneration _snake_case : str = ["""audio"""] _snake_case : Optional[int] = ["""text"""] def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features def _snake_case ( self : str , lowerCamelCase : List[Any] ): '''simple docstring''' return self.model.generate(inputs=lowerCamelCase ) def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
655
0
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def snake_case_ ( ): __lowercase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" ) return image def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = dct.pop(_SCREAMING_SNAKE_CASE ) __lowercase = val def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) __lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict __lowercase = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) ) __lowercase = qkv_bias def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = 3_6_4 if "coco" in model_name else 2_2_4 __lowercase = InstructBlipVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: __lowercase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __lowercase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: __lowercase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2_0_0_1 ).to_dict() elif "vicuna-13b" in model_name: __lowercase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2_0_0_1 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 __lowercase = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict() __lowercase = InstructBlipConfig(vision_config=_SCREAMING_SNAKE_CASE , text_config=_SCREAMING_SNAKE_CASE , qformer_config=_SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ): __lowercase = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: __lowercase = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) __lowercase = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) __lowercase , __lowercase = get_blipa_config(_SCREAMING_SNAKE_CASE ) __lowercase = InstructBlipForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() __lowercase = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } __lowercase , __lowercase = model_name_to_original[model_name] # load original model print("Loading original model..." ) __lowercase = "cuda:1" if torch.cuda.is_available() else "cpu" __lowercase = "cuda:2" if torch.cuda.is_available() else "cpu" __lowercase , __lowercase , __lowercase = load_model_and_preprocess( name=_SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , is_eval=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) original_model.eval() print("Done!" ) # update state dict keys __lowercase = original_model.state_dict() __lowercase = create_rename_keys(_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith("Qformer.bert" ): __lowercase = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: __lowercase = key.replace("self" , "attention" ) if "llm_proj" in key: __lowercase = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: __lowercase = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): __lowercase = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): __lowercase = key.replace("t5" , "language" ) __lowercase = val # read in qv biases read_in_q_v_bias(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) __lowercase = load_demo_image() __lowercase = "What is unusual about this image?" # create processor __lowercase = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE ) __lowercase = InstructBlipProcessor( image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE , ) __lowercase = processor(images=_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values __lowercase = vis_processors["eval"](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE ) __lowercase = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _SCREAMING_SNAKE_CASE ) original_model.to(_SCREAMING_SNAKE_CASE ) hf_model.to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "vicuna" in model_name: __lowercase = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits __lowercase = hf_model(**_SCREAMING_SNAKE_CASE ).logits else: __lowercase = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits __lowercase = tokenizer("\n" , return_tensors="pt" ).input_ids.to(_SCREAMING_SNAKE_CASE ) __lowercase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 ) __lowercase = hf_model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape __lowercase = 1E-4 if "vicuna" in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) print("Looks ok!" ) print("Generating with original model..." ) __lowercase = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) __lowercase = hf_model.generate( **_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? __lowercase = 2 print("Original generation:" , _SCREAMING_SNAKE_CASE ) __lowercase = processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) __lowercase = [text.strip() for text in output_text] print("HF generation:" , _SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() snake_case__ : Tuple = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) snake_case__ : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
713
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _A : '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["prompt"] __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] if "image" in inputs: __lowercase = inputs["image"] else: __lowercase = None if "mask_image" in inputs: __lowercase = inputs["mask_image"] else: __lowercase = None if "original_image" in inputs: __lowercase = inputs["original_image"] else: __lowercase = None __lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase ) # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 )
655
0
import numpy as np def snake_case_ ( _SCREAMING_SNAKE_CASE ): return 1 / (1 + np.exp(-vector )) def snake_case_ ( _SCREAMING_SNAKE_CASE ): return vector * sigmoid(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
714
import numpy as np snake_case__ : Tuple = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class _A : '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase , __lowercase = np.where(letter == self.SQUARE ) __lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() __lowercase = message.replace(" " , "" ) __lowercase = message.replace("j" , "i" ) __lowercase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape(2 * len(lowerCamelCase ) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[numbers_index * 2] ) __lowercase = int(second_step[(numbers_index * 2) + 1] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = encoded_message + letter return encoded_message def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() message.replace(" " , "" ) __lowercase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape((2, len(lowerCamelCase )) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[0, numbers_index] ) __lowercase = int(second_step[1, numbers_index] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = decoded_message + letter return decoded_message
655
0
def snake_case_ ( _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("Input list must be a non empty list" ) if len(_SCREAMING_SNAKE_CASE ) == 1: return True __lowercase = series[1] - series[0] for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def snake_case_ ( _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("Input list must be a non empty list" ) __lowercase = 0 for val in series: answer += val return answer / len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
715
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _A ( ctypes.Structure ): '''simple docstring''' _snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def snake_case_ ( ): try: hide_cursor() yield finally: show_cursor()
655
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Optional[Any] = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
716
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : List[str] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """yolos""" def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = version.parse("""1.11""" ) @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : str ): '''simple docstring''' return 1e-4 @property def _snake_case ( self : Tuple ): '''simple docstring''' return 12
655
0
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version snake_case__ : List[str] = logging.getLogger(__name__) require_version("""pytorch_lightning>=1.0.4""") snake_case__ : Optional[Any] = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization snake_case__ : Optional[int] = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } snake_case__ : List[Any] = sorted(arg_to_scheduler.keys()) snake_case__ : str = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class _A ( pl.LightningModule ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : argparse.Namespace , lowerCamelCase : List[Any]=None , lowerCamelCase : str="base" , lowerCamelCase : int=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowerCamelCase ) __lowercase = 0 __lowercase = Path(self.hparams.output_dir ) __lowercase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __lowercase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=lowerCamelCase , **lowerCamelCase , ) else: __lowercase = config __lowercase = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams , lowerCamelCase , lowerCamelCase ): assert hasattr(self.config , lowerCamelCase ), f"""model config doesn't have a `{p}` attribute""" setattr(self.config , lowerCamelCase , getattr(self.hparams , lowerCamelCase ) ) if tokenizer is None: __lowercase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCamelCase , ) else: __lowercase = tokenizer __lowercase = MODEL_MODES[mode] if model is None: __lowercase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowerCamelCase , ) else: __lowercase = model def _snake_case ( self : List[str] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = self.model_type.from_pretrained(*lowerCamelCase , **lowerCamelCase ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = arg_to_scheduler[self.hparams.lr_scheduler] __lowercase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __lowercase = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.model __lowercase = ["bias", "LayerNorm.weight"] __lowercase = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] if self.hparams.adafactor: __lowercase = Adafactor( lowerCamelCase , lr=self.hparams.learning_rate , scale_parameter=lowerCamelCase , relative_step=lowerCamelCase ) else: __lowercase = AdamW( lowerCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __lowercase = optimizer __lowercase = self.get_lr_scheduler() return [optimizer], [scheduler] def _snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any ): '''simple docstring''' return self.validation_step(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Tuple , lowerCamelCase : Any ): '''simple docstring''' return self.validation_end(lowerCamelCase ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __lowercase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ): '''simple docstring''' if stage == "test": __lowercase = len(self.test_dataloader().dataset ) else: __lowercase = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=lowerCamelCase ) __lowercase = len(self.train_dataloader().dataset ) def _snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : bool = False ): '''simple docstring''' raise NotImplementedError("You must implement this for your task" ) def _snake_case ( self : List[Any] ): '''simple docstring''' return self.train_loader def _snake_case ( self : List[Any] ): '''simple docstring''' return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=lowerCamelCase ) def _snake_case ( self : List[str] ): '''simple docstring''' return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=lowerCamelCase ) def _snake_case ( self : Optional[Any] , lowerCamelCase : Any ): '''simple docstring''' return os.path.join( self.hparams.data_dir , "cached_{}_{}_{}".format( lowerCamelCase , list(filter(lowerCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def _snake_case ( self : Optional[int] , lowerCamelCase : Dict[str, Any] ): '''simple docstring''' __lowercase = self.output_dir.joinpath("best_tfmr" ) __lowercase = self.step_count self.model.save_pretrained(lowerCamelCase ) self.tokenizer.save_pretrained(lowerCamelCase ) @staticmethod def _snake_case ( lowerCamelCase : Any , lowerCamelCase : List[str] ): '''simple docstring''' parser.add_argument( "--model_name_or_path" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--config_name" , default="" , type=lowerCamelCase , help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name" , default=lowerCamelCase , type=lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument( "--cache_dir" , default=str(Path(lowerCamelCase ).parent / "test_run" / "cache" ) , type=lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , ) parser.add_argument( "--encoder_layerdrop" , type=lowerCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--decoder_layerdrop" , type=lowerCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--dropout" , type=lowerCamelCase , help="Dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--attention_dropout" , type=lowerCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , ) parser.add_argument("--learning_rate" , default=5e-5 , type=lowerCamelCase , help="The initial learning rate for Adam." ) parser.add_argument( "--lr_scheduler" , default="linear" , choices=lowerCamelCase , metavar=lowerCamelCase , type=lowerCamelCase , help="Learning rate scheduler" , ) parser.add_argument("--weight_decay" , default=0.0 , type=lowerCamelCase , help="Weight decay if we apply some." ) parser.add_argument("--adam_epsilon" , default=1e-8 , type=lowerCamelCase , help="Epsilon for Adam optimizer." ) parser.add_argument("--warmup_steps" , default=0 , type=lowerCamelCase , help="Linear warmup over warmup_steps." ) parser.add_argument("--num_workers" , default=4 , type=lowerCamelCase , help="kwarg passed to DataLoader" ) parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=lowerCamelCase ) parser.add_argument("--train_batch_size" , default=32 , type=lowerCamelCase ) parser.add_argument("--eval_batch_size" , default=32 , type=lowerCamelCase ) parser.add_argument("--adafactor" , action="store_true" ) class _A ( pl.Callback ): '''simple docstring''' def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _A ( pl.Callback ): '''simple docstring''' def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowerCamelCase ) class _A ( pl.Callback ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : List[str] ): '''simple docstring''' __lowercase = trainer.lr_schedulers[0]["scheduler"] __lowercase = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowerCamelCase ) def _snake_case ( self : List[str] , lowerCamelCase : pl.Trainer , lowerCamelCase : pl.LightningModule ): '''simple docstring''' rank_zero_info("***** Validation results *****" ) __lowercase = trainer.callback_metrics # Log results for key in sorted(lowerCamelCase ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(lowerCamelCase , str(metrics[key] ) ) ) def _snake_case ( self : List[str] , lowerCamelCase : pl.Trainer , lowerCamelCase : pl.LightningModule ): '''simple docstring''' rank_zero_info("***** Test results *****" ) __lowercase = trainer.callback_metrics # Log and save results to file __lowercase = os.path.join(pl_module.hparams.output_dir , "test_results.txt" ) with open(lowerCamelCase , "w" ) as writer: for key in sorted(lowerCamelCase ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(lowerCamelCase , str(metrics[key] ) ) ) writer.write("{} = {}\n".format(lowerCamelCase , str(metrics[key] ) ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / "test_run" / "model_checkpoints" ) , type=_SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_SCREAMING_SNAKE_CASE , default="O2" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_SCREAMING_SNAKE_CASE , help="Max gradient norm" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." ) parser.add_argument( "--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=4_2 , help="random seed for initialization" ) parser.add_argument( "--data_dir" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-train-data" ) , type=_SCREAMING_SNAKE_CASE , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[] , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): pl.seed_everything(args.seed ) # init model __lowercase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) # add custom checkpoints if checkpoint_callback is None: __lowercase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_SCREAMING_SNAKE_CASE ) if logging_callback is None: __lowercase = LoggingCallback() __lowercase = {} if args.fpaa: __lowercase = 1_6 if args.gpus > 1: __lowercase = "auto" __lowercase = "ddp" __lowercase = args.accumulate_grad_batches __lowercase = None __lowercase = "auto" __lowercase = pl.Trainer.from_argparse_args( _SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , ) if args.do_train: trainer.fit(_SCREAMING_SNAKE_CASE ) else: print("RAG modeling tests with new set functions successfuly executed!" ) return trainer
717
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[int] = logging.get_logger(__name__) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) __lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE ) if matches: __lowercase = float(matches[1] ) __lowercase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __lowercase = 1_0_0_1 __lowercase = "imagenet-1k-id2label.json" __lowercase = "huggingface/label-files" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} __lowercase = "background" __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} return config def snake_case_ ( ): __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE ) # Load 🤗 model __lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __lowercase = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , ) __lowercase = image_processor(images=prepare_img() , return_tensors="pt" ) __lowercase = model(**_SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits assert logits.shape == (1, 1_0_0_1) if model_name == "mobilenet_v1_1.0_224": __lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": __lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: __lowercase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("Pushing to the hub..." ) __lowercase = "google/" + model_name image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""mobilenet_v1_1.0_224""", type=str, help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""", ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
655
0
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline snake_case__ : int = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False) parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""") parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""") snake_case__ : str = parser.parse_args() snake_case__ : int = """cpu""" snake_case__ : Optional[int] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings""" snake_case__ : Union[str, Any] = """path-to-your-trained-model""" snake_case__ : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: snake_case__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) snake_case__ : Union[str, Any] = pipe.to(device) # to channels last snake_case__ : Any = pipe.unet.to(memory_format=torch.channels_last) snake_case__ : Any = pipe.vae.to(memory_format=torch.channels_last) snake_case__ : int = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: snake_case__ : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex snake_case__ : List[Any] = torch.randn(2, 4, 64, 64) snake_case__ : Optional[int] = torch.rand(1) * 9_99 snake_case__ : str = torch.randn(2, 77, 7_68) snake_case__ : List[Any] = (sample, timestep, encoder_hidden_status) try: snake_case__ : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: snake_case__ : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) snake_case__ : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) snake_case__ : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: snake_case__ : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute snake_case__ : Dict = 6_66 snake_case__ : List[str] = torch.Generator(device).manual_seed(seed) snake_case__ : int = {"""generator""": generator} if args.steps is not None: snake_case__ : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): snake_case__ : Tuple = pipe(prompt, **generate_kwargs).images[0] # save image image.save("""generated.png""")
718
from __future__ import annotations from typing import Any class _A : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def _snake_case ( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(lowerCamelCase ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase , __lowercase , __lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def snake_case_ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
655
0
'''simple docstring''' import numpy as np import datasets snake_case__ : Union[str, Any] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ snake_case__ : List[Any] = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ snake_case__ : Any = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Optional[int] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) __lowercase = np.array(lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __lowercase = X - np.mean(lowerCamelCase ) __lowercase = np.cov(reference_distribution.T ) try: __lowercase = np.linalg.inv(lowerCamelCase ) except np.linalg.LinAlgError: __lowercase = np.linalg.pinv(lowerCamelCase ) __lowercase = np.dot(lowerCamelCase , lowerCamelCase ) __lowercase = np.dot(lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
719
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : List[str] = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
655
0
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Union[str, Any] = """▁""" snake_case__ : Dict = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} snake_case__ : Optional[Any] = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } snake_case__ : Any = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } snake_case__ : Union[str, Any] = { """ernie-m-base""": 5_14, """ernie-m-large""": 5_14, } snake_case__ : Optional[int] = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = ["input_ids"] _snake_case : Union[str, Any] = VOCAB_FILES_NAMES _snake_case : Dict = PRETRAINED_INIT_CONFIGURATION _snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[str] = RESOURCE_FILES_NAMES def __init__( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : List[str]=None , lowerCamelCase : int=False , lowerCamelCase : List[Any]="utf8" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : int="[PAD]" , lowerCamelCase : List[str]="[CLS]" , lowerCamelCase : Optional[Any]="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : List[str] , ): '''simple docstring''' __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , vocab_file=lowerCamelCase , encoding=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) __lowercase = do_lower_case __lowercase = sentencepiece_model_ckpt __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: __lowercase = self.load_vocab(filepath=lowerCamelCase ) else: __lowercase = {self.sp_model.id_to_piece(lowerCamelCase ): id for id in range(self.sp_model.get_piece_size() )} __lowercase = {v: k for k, v in self.vocab.items()} def _snake_case ( self : str , lowerCamelCase : Optional[int] ): '''simple docstring''' if text is None: return None __lowercase = self.tokenize(lowerCamelCase ) __lowercase , __lowercase = "", [] for i, ch in enumerate(lowerCamelCase ): if ch in self.SP_CHAR_MAPPING: __lowercase = self.SP_CHAR_MAPPING.get(lowerCamelCase ) else: __lowercase = unicodedata.normalize("NFKC" , lowerCamelCase ) if self.is_whitespace(lowerCamelCase ): continue normalized_text += ch char_mapping.extend([i] * len(lowerCamelCase ) ) __lowercase , __lowercase , __lowercase = normalized_text, [], 0 if self.do_lower_case: __lowercase = text.lower() for token in split_tokens: if token[:1] == "▁": __lowercase = token[1:] __lowercase = text[offset:].index(lowerCamelCase ) + offset __lowercase = start + len(lowerCamelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) __lowercase = end return token_mapping @property def _snake_case ( self : Any ): '''simple docstring''' return len(self.vocab ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : Optional[int] ): '''simple docstring''' __lowercase = self.__dict__.copy() __lowercase = None return state def __setstate__( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __lowercase = {} __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _snake_case ( self : int , lowerCamelCase : List[str] ): '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(lowerCamelCase , lowerCamelCase ) for c in text) ) def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : int=False , lowerCamelCase : Tuple=64 , lowerCamelCase : Optional[int]=0.1 ): '''simple docstring''' if self.sp_model_kwargs.get("enable_sampling" ) is True: __lowercase = True if self.sp_model_kwargs.get("alpha" ) is not None: __lowercase = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: __lowercase = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: __lowercase = self.sp_model.EncodeAsPieces(lowerCamelCase ) else: __lowercase = self.sp_model.SampleEncodeAsPieces(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowercase = [] for pi, piece in enumerate(lowerCamelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(lowerCamelCase ) and pi != 0: new_pieces.append(lowerCamelCase ) continue else: continue __lowercase = 0 for i, chunk in enumerate(lowerCamelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(lowerCamelCase ) or self.is_punct(lowerCamelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(lowerCamelCase ) __lowercase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) __lowercase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) __lowercase = i if len(lowerCamelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _snake_case ( self : Any , lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip() return out_string def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = self.convert_ids_to_tokens(lowerCamelCase ) __lowercase = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip() return out_string def _snake_case ( self : Tuple , lowerCamelCase : List[str] ): '''simple docstring''' return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) ) def _snake_case ( self : List[Any] , lowerCamelCase : List[Any] ): '''simple docstring''' return self.reverse_vocab.get(lowerCamelCase , self.unk_token ) def _snake_case ( self : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str]=None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _snake_case ( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str=None ): '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=None , lowerCamelCase : List[str]=False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(lowerCamelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(lowerCamelCase ) + 1) + [1] * (len(lowerCamelCase ) + 3) def _snake_case ( self : Any , lowerCamelCase : int ): '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def _snake_case ( self : List[str] , lowerCamelCase : Any ): '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def _snake_case ( self : Optional[int] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(lowerCamelCase ) == 1: __lowercase = unicodedata.category(lowerCamelCase ) if cat == "Zs": return True return False def _snake_case ( self : Optional[Any] , lowerCamelCase : Dict ): '''simple docstring''' __lowercase = {} with io.open(lowerCamelCase , "r" , encoding="utf-8" ) as f: for index, line in enumerate(lowerCamelCase ): __lowercase = line.rstrip("\n" ) __lowercase = int(lowerCamelCase ) return token_to_idx def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' __lowercase = 0 if os.path.isdir(lowerCamelCase ): __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: __lowercase = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) __lowercase = token_index writer.write(token + "\n" ) index += 1 __lowercase = os.path.join(lowerCamelCase , "sentencepiece.bpe.model" ) with open(lowerCamelCase , "wb" ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) return (vocab_file,)
720
from __future__ import annotations import bisect def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): if hi < 0: __lowercase = len(_SCREAMING_SNAKE_CASE ) while lo < hi: __lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowercase = mid + 1 else: __lowercase = mid return lo def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ): sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = 0 __lowercase = len(_SCREAMING_SNAKE_CASE ) - 1 while left <= right: __lowercase = left + (right - left) // 2 __lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowercase = midpoint - 1 else: __lowercase = midpoint + 1 return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item: return index return None def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if right < left: return None __lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 ) else: return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip() snake_case__ : Any = sorted(int(item) for item in user_input.split(""",""")) snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n""")) snake_case__ : List[Any] = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
655
0
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def snake_case_ ( _SCREAMING_SNAKE_CASE="" ): __lowercase = tempfile.mkdtemp() return os.path.join(_SCREAMING_SNAKE_CASE , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ): '''simple docstring''' __lowercase = torch.rand(12 , dtype=torch.floataa ) - 0.5 __lowercase = AgentAudio(lowerCamelCase ) __lowercase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowerCamelCase ) ) # Ensure that the file contains the same value as the original tensor __lowercase , __lowercase = sf.read(lowerCamelCase ) self.assertTrue(torch.allclose(lowerCamelCase , torch.tensor(lowerCamelCase ) , atol=1e-4 ) ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = torch.rand(12 , dtype=torch.floataa ) - 0.5 __lowercase = get_new_path(suffix=".wav" ) sf.write(lowerCamelCase , lowerCamelCase , 16_000 ) __lowercase = AgentAudio(lowerCamelCase ) self.assertTrue(torch.allclose(lowerCamelCase , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , lowerCamelCase ) @require_vision @require_torch class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ): '''simple docstring''' __lowercase = torch.randint(0 , 256 , (64, 64, 3) ) __lowercase = AgentImage(lowerCamelCase ) __lowercase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase ) ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" __lowercase = Image.open(lowerCamelCase ) __lowercase = AgentImage(lowerCamelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase ) ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" __lowercase = Image.open(lowerCamelCase ) __lowercase = AgentImage(lowerCamelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase ) ) class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "Hey!" __lowercase = AgentText(lowerCamelCase ) self.assertEqual(lowerCamelCase , agent_type.to_string() ) self.assertEqual(lowerCamelCase , agent_type.to_raw() ) self.assertEqual(lowerCamelCase , lowerCamelCase )
721
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """conditional_detr""" _snake_case : Union[str, Any] = ["""past_key_values"""] _snake_case : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = backbone_config.get("model_type" ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(lowerCamelCase ) __lowercase = use_timm_backbone __lowercase = backbone_config __lowercase = num_channels __lowercase = num_queries __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = init_xavier_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = encoder_layers __lowercase = auxiliary_loss __lowercase = position_embedding_type __lowercase = backbone __lowercase = use_pretrained_backbone __lowercase = dilation # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = mask_loss_coefficient __lowercase = dice_loss_coefficient __lowercase = cls_loss_coefficient __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return self.encoder_attention_heads @property def _snake_case ( self : str ): '''simple docstring''' return self.d_model def _snake_case ( self : int ): '''simple docstring''' __lowercase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output class _A ( _lowercase ): '''simple docstring''' _snake_case : Any = version.parse("""1.11""" ) @property def _snake_case ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _snake_case ( self : Any ): '''simple docstring''' return 1e-5 @property def _snake_case ( self : Optional[Any] ): '''simple docstring''' return 12
655
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Dict = { """weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""", } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = """roc_bert""" def __init__( self : int , lowerCamelCase : int=30_522 , lowerCamelCase : str=768 , lowerCamelCase : List[str]=12 , lowerCamelCase : str=12 , lowerCamelCase : Tuple=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Dict=512 , lowerCamelCase : Tuple=2 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Dict=1e-12 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Dict="absolute" , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any]=768 , lowerCamelCase : int=910 , lowerCamelCase : Union[str, Any]=512 , lowerCamelCase : Dict=24_858 , lowerCamelCase : Any=True , **lowerCamelCase : Dict , ): '''simple docstring''' __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = type_vocab_size __lowercase = layer_norm_eps __lowercase = use_cache __lowercase = enable_pronunciation __lowercase = enable_shape __lowercase = pronunciation_embed_dim __lowercase = pronunciation_vocab_size __lowercase = shape_embed_dim __lowercase = shape_vocab_size __lowercase = concat_input __lowercase = position_embedding_type __lowercase = classifier_dropout super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
700
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ : Any = logging.get_logger(__name__) class _A ( _lowercase , _lowercase ): '''simple docstring''' _snake_case : Dict = """maskformer-swin""" _snake_case : List[str] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ): '''simple docstring''' super().__init__(**lowerCamelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowerCamelCase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) ) __lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
655
0
import numpy as np snake_case__ : Tuple = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class _A : '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase , __lowercase = np.where(letter == self.SQUARE ) __lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() __lowercase = message.replace(" " , "" ) __lowercase = message.replace("j" , "i" ) __lowercase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape(2 * len(lowerCamelCase ) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[numbers_index * 2] ) __lowercase = int(second_step[(numbers_index * 2) + 1] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = encoded_message + letter return encoded_message def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() message.replace(" " , "" ) __lowercase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape((2, len(lowerCamelCase )) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[0, numbers_index] ) __lowercase = int(second_step[1, numbers_index] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = decoded_message + letter return decoded_message
701
def snake_case_ ( _SCREAMING_SNAKE_CASE ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): __lowercase = int(sequence[i] , 2 ) return sequence def snake_case_ ( _SCREAMING_SNAKE_CASE ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __lowercase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __lowercase = gray_code_sequence_string(bit_count - 1 ) __lowercase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __lowercase = "0" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __lowercase = "1" + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
655
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu snake_case__ : Optional[int] = False class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _snake_case ( self : Any ): '''simple docstring''' return 12 @property def _snake_case ( self : Optional[int] ): '''simple docstring''' return 12 @property def _snake_case ( self : int ): '''simple docstring''' return 32 @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def _snake_case ( self : str ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase ) @property def _snake_case ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = 12 __lowercase = 12 __lowercase = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } __lowercase = TransformeraDModel(**lowerCamelCase ) return model def _snake_case ( self : Any ): '''simple docstring''' __lowercase = "cpu" __lowercase = self.dummy_vqvae __lowercase = self.dummy_text_encoder __lowercase = self.dummy_tokenizer __lowercase = self.dummy_transformer __lowercase = VQDiffusionScheduler(self.num_embed ) __lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase ) __lowercase = VQDiffusionPipeline( vqvae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , transformer=lowerCamelCase , scheduler=lowerCamelCase , learned_classifier_free_sampling_embeddings=lowerCamelCase , ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = "teddy bear playing in the pool" __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __lowercase = pipe([prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" ) __lowercase = output.images __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __lowercase = pipe( [prompt] , generator=lowerCamelCase , output_type="np" , return_dict=lowerCamelCase , num_inference_steps=2 )[0] __lowercase = image[0, -3:, -3:, -1] __lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowercase = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = "cpu" __lowercase = self.dummy_vqvae __lowercase = self.dummy_text_encoder __lowercase = self.dummy_tokenizer __lowercase = self.dummy_transformer __lowercase = VQDiffusionScheduler(self.num_embed ) __lowercase = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowercase = VQDiffusionPipeline( vqvae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , transformer=lowerCamelCase , scheduler=lowerCamelCase , learned_classifier_free_sampling_embeddings=lowerCamelCase , ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = "teddy bear playing in the pool" __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __lowercase = pipe([prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" ) __lowercase = output.images __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __lowercase = pipe( [prompt] , generator=lowerCamelCase , output_type="np" , return_dict=lowerCamelCase , num_inference_steps=2 )[0] __lowercase = image[0, -3:, -3:, -1] __lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowercase = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) __lowercase = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) __lowercase = pipeline.to(lowerCamelCase ) pipeline.set_progress_bar_config(disable=lowerCamelCase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __lowercase = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=lowerCamelCase , output_type="np" , ) __lowercase = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
702
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ): model.train() __lowercase = model(_SCREAMING_SNAKE_CASE ) __lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): set_seed(4_2 ) __lowercase = RegressionModel() __lowercase = deepcopy(_SCREAMING_SNAKE_CASE ) __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) model.to(accelerator.device ) if sched: __lowercase = AdamW(params=model.parameters() , lr=1E-3 ) __lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) __lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 ) # Make a copy of `model` if sched: __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test when on a single CPU or GPU that the context manager does nothing __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE ): # Test on distributed setup that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) # Use a single batch __lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: # Sync grads step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) __lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )] GradientState._reset_state() def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): __lowercase = Accelerator( split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = batch.values() # Gather the distributed inputs and targs for the base model __lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) ) __lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_SCREAMING_SNAKE_CASE ): step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n""" __lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )) if accelerator.num_processes > 1: check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def snake_case_ ( ): __lowercase = Accelerator() __lowercase = RegressionDataset(length=8_0 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase = RegressionDataset(length=9_6 ) __lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 ) __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if iteration < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ): assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE ) if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def snake_case_ ( ): __lowercase = Accelerator() __lowercase = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(_SCREAMING_SNAKE_CASE ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
655
0
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ : List[str] = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } snake_case__ : Dict = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' _snake_case : Optional[Any] = """mask2former""" _snake_case : Union[str, Any] = ["""swin"""] _snake_case : Union[str, Any] = {"""hidden_size""": """hidden_dim"""} def __init__( self : Tuple , lowerCamelCase : Optional[Dict] = None , lowerCamelCase : int = 256 , lowerCamelCase : int = 256 , lowerCamelCase : int = 256 , lowerCamelCase : int = 1_024 , lowerCamelCase : str = "relu" , lowerCamelCase : int = 6 , lowerCamelCase : int = 10 , lowerCamelCase : int = 8 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 2_048 , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : int = 4 , lowerCamelCase : int = 255 , lowerCamelCase : int = 100 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 5.0 , lowerCamelCase : float = 5.0 , lowerCamelCase : int = 12_544 , lowerCamelCase : float = 3.0 , lowerCamelCase : float = 0.75 , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 1.0 , lowerCamelCase : bool = True , lowerCamelCase : List[int] = [4, 8, 16, 32] , lowerCamelCase : bool = None , **lowerCamelCase : List[Any] , ): '''simple docstring''' if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." ) __lowercase = CONFIG_MAPPING["swin"]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = backbone_config.pop("model_type" ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(lowerCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**lowerCamelCase ) @classmethod def _snake_case ( cls : Optional[int] , lowerCamelCase : PretrainedConfig , **lowerCamelCase : List[Any] ): '''simple docstring''' return cls( backbone_config=lowerCamelCase , **lowerCamelCase , ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
703
from ....utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) class _A ( _lowercase ): '''simple docstring''' def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ): '''simple docstring''' __lowercase = config.__dict__ __lowercase = modal_hidden_size if num_labels: __lowercase = num_labels
655
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Optional[int] = logging.get_logger(__name__) snake_case__ : Optional[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _A ( _lowercase ): '''simple docstring''' _snake_case : List[Any] = """gptsan-japanese""" _snake_case : int = [ """past_key_values""", ] _snake_case : str = { """hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : str , lowerCamelCase : Optional[int]=36_000 , lowerCamelCase : Optional[Any]=1_280 , lowerCamelCase : Dict=1_024 , lowerCamelCase : Optional[int]=8_192 , lowerCamelCase : Union[str, Any]=4_096 , lowerCamelCase : List[Any]=128 , lowerCamelCase : int=10 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Any=128 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=1e-5 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : int=0.0 , lowerCamelCase : str="float32" , lowerCamelCase : Optional[int]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=0.002 , lowerCamelCase : int=False , lowerCamelCase : List[Any]=True , lowerCamelCase : str=35_998 , lowerCamelCase : Dict=35_995 , lowerCamelCase : str=35_999 , **lowerCamelCase : int , ): '''simple docstring''' __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = d_model __lowercase = d_ff __lowercase = d_ext __lowercase = d_spout __lowercase = num_switch_layers __lowercase = num_ext_layers __lowercase = num_switch_layers + num_ext_layers __lowercase = num_heads __lowercase = num_experts __lowercase = expert_capacity __lowercase = dropout_rate __lowercase = layer_norm_epsilon __lowercase = router_bias __lowercase = router_jitter_noise __lowercase = router_dtype __lowercase = router_ignore_padding_tokens __lowercase = output_hidden_states __lowercase = output_attentions __lowercase = initializer_factor __lowercase = output_router_logits __lowercase = use_cache super().__init__( separator_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
704
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = StableUnCLIPImgaImgPipeline _snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : int = frozenset([] ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = 32 __lowercase = embedder_hidden_size # image encoding components __lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' if str(lowerCamelCase ).startswith("mps" ): __lowercase = torch.manual_seed(lowerCamelCase ) else: __lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __lowercase = input_image * 0.5 + 0.5 __lowercase = input_image.clamp(0 , 1 ) __lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __lowercase = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __lowercase = sd_pipe(**lowerCamelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _snake_case ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __lowercase = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
655
0
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ): __lowercase = None if token is not None: __lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) __lowercase = "636036" __lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" __lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) __lowercase = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": __lowercase = workflow_run["id"] break return workflow_run_id def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: __lowercase = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: __lowercase = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase = {} for artifact_name in artifact_names: __lowercase = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): __lowercase = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: __lowercase = f.read().decode("UTF-8" ) return results
705
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _A ( _lowercase , _lowercase ): '''simple docstring''' @register_to_config def __init__( self : Optional[Any] , *, lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__() __lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) ) # parameters for additional clip time embeddings __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) # parameters for encoder hidden states __lowercase = clip_extra_context_tokens __lowercase = nn.Linear( lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) __lowercase = nn.Linear(lowerCamelCase , lowerCamelCase ) __lowercase = nn.LayerNorm(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowercase = image_embeddings.shape[0] __lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowercase = classifier_free_guidance_embeddings.expand( lowerCamelCase , -1 ) __lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowercase = self.embedding_proj(lowerCamelCase ) __lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase ) __lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase ) __lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens ) __lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowercase = self.encoder_hidden_states_proj(lowerCamelCase ) __lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase ) __lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
655
0
import random from .binary_exp_mod import bin_exp_mod def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_0_0_0 ): if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __lowercase = n - 1 __lowercase = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __lowercase = 0 while count < prec: __lowercase = random.randint(2 , n - 1 ) __lowercase = bin_exp_mod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if b != 1: __lowercase = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: __lowercase = False break __lowercase = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": snake_case__ : Dict = abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
706
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case__ : Union[str, Any] = TypeVar("""T""") snake_case__ : Optional[int] = TypeVar("""U""") class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ): '''simple docstring''' __lowercase = key __lowercase = val __lowercase = None __lowercase = None def __repr__( self : Any ): '''simple docstring''' return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class _A ( Generic[T, U] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) __lowercase , __lowercase = self.rear, self.head def __repr__( self : Optional[Any] ): '''simple docstring''' __lowercase = ["DoubleLinkedList"] __lowercase = self.head while node.next is not None: rep.append(str(lowerCamelCase ) ) __lowercase = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' __lowercase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __lowercase = node __lowercase = previous __lowercase = node __lowercase = self.rear def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None __lowercase = node.next __lowercase = node.prev __lowercase = None __lowercase = None return node class _A ( Generic[T, U] ): '''simple docstring''' _snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self : List[Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = DoubleLinkedList() __lowercase = capacity __lowercase = 0 __lowercase = 0 __lowercase = 0 __lowercase = {} def __repr__( self : Optional[Any] ): '''simple docstring''' return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : Dict , lowerCamelCase : T ): '''simple docstring''' return key in self.cache def _snake_case ( self : List[Any] , lowerCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 __lowercase = self.cache[key] __lowercase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowerCamelCase ) return node.val self.miss += 1 return None def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __lowercase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowerCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __lowercase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __lowercase = value self.list.add(lowerCamelCase ) @classmethod def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ): '''simple docstring''' def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*lowerCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: __lowercase = LRUCache(lowerCamelCase ) __lowercase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __lowercase = func(*lowerCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
655
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated snake_case__ : Optional[int] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ snake_case__ : str = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = numpy.dtype(numpy.uintaa ).newbyteorder(">" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_SCREAMING_SNAKE_CASE )[0] @deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.data to implement this functionality." ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): print("Extracting" , f.name ) with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream: __lowercase = _readaa(_SCREAMING_SNAKE_CASE ) if magic != 2_0_5_1: raise ValueError( "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) ) __lowercase = _readaa(_SCREAMING_SNAKE_CASE ) __lowercase = _readaa(_SCREAMING_SNAKE_CASE ) __lowercase = _readaa(_SCREAMING_SNAKE_CASE ) __lowercase = bytestream.read(rows * cols * num_images ) __lowercase = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) __lowercase = data.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 ) return data @deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.one_hot on tensors." ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = labels_dense.shape[0] __lowercase = numpy.arange(_SCREAMING_SNAKE_CASE ) * num_classes __lowercase = numpy.zeros((num_labels, num_classes) ) __lowercase = 1 return labels_one_hot @deprecated(_SCREAMING_SNAKE_CASE , "Please use tf.data to implement this functionality." ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_0 ): print("Extracting" , f.name ) with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream: __lowercase = _readaa(_SCREAMING_SNAKE_CASE ) if magic != 2_0_4_9: raise ValueError( "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) ) __lowercase = _readaa(_SCREAMING_SNAKE_CASE ) __lowercase = bytestream.read(_SCREAMING_SNAKE_CASE ) __lowercase = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return labels class _A : '''simple docstring''' @deprecated( lowerCamelCase , "Please use alternatives such as official/mnist/_DataSet.py" " from tensorflow/models." , ) def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]=False , lowerCamelCase : str=False , lowerCamelCase : str=dtypes.floataa , lowerCamelCase : Optional[int]=True , lowerCamelCase : int=None , ): '''simple docstring''' __lowercase , __lowercase = random_seed.get_seed(lowerCamelCase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowercase = dtypes.as_dtype(lowerCamelCase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype ) if fake_data: __lowercase = 10_000 __lowercase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"""images.shape: {images.shape} labels.shape: {labels.shape}""" __lowercase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowercase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowercase = images.astype(numpy.floataa ) __lowercase = numpy.multiply(lowerCamelCase , 1.0 / 255.0 ) __lowercase = images __lowercase = labels __lowercase = 0 __lowercase = 0 @property def _snake_case ( self : Union[str, Any] ): '''simple docstring''' return self._images @property def _snake_case ( self : List[str] ): '''simple docstring''' return self._labels @property def _snake_case ( self : Tuple ): '''simple docstring''' return self._num_examples @property def _snake_case ( self : List[str] ): '''simple docstring''' return self._epochs_completed def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=True ): '''simple docstring''' if fake_data: __lowercase = [1] * 784 __lowercase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowerCamelCase )], [fake_label for _ in range(lowerCamelCase )], ) __lowercase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowercase = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase ) __lowercase = self.images[perma] __lowercase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowercase = self._num_examples - start __lowercase = self._images[start : self._num_examples] __lowercase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowercase = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase ) __lowercase = self.images[perm] __lowercase = self.labels[perm] # Start next epoch __lowercase = 0 __lowercase = batch_size - rest_num_examples __lowercase = self._index_in_epoch __lowercase = self._images[start:end] __lowercase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowercase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_SCREAMING_SNAKE_CASE , "Please write your own downloading logic." ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not gfile.Exists(_SCREAMING_SNAKE_CASE ): gfile.MakeDirs(_SCREAMING_SNAKE_CASE ) __lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not gfile.Exists(_SCREAMING_SNAKE_CASE ): urllib.request.urlretrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # noqa: S310 with gfile.GFile(_SCREAMING_SNAKE_CASE ) as f: __lowercase = f.size() print("Successfully downloaded" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "bytes." ) return filepath @deprecated( _SCREAMING_SNAKE_CASE , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=dtypes.floataa , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0_0_0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE ) __lowercase = fake() __lowercase = fake() __lowercase = fake() return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE ) if not source_url: # empty string check __lowercase = DEFAULT_SOURCE_URL __lowercase = "train-images-idx3-ubyte.gz" __lowercase = "train-labels-idx1-ubyte.gz" __lowercase = "t10k-images-idx3-ubyte.gz" __lowercase = "t10k-labels-idx1-ubyte.gz" __lowercase = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_images_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f: __lowercase = _extract_images(_SCREAMING_SNAKE_CASE ) __lowercase = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_labels_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f: __lowercase = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE ) __lowercase = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_images_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f: __lowercase = _extract_images(_SCREAMING_SNAKE_CASE ) __lowercase = _maybe_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_labels_file ) with gfile.Open(_SCREAMING_SNAKE_CASE , "rb" ) as f: __lowercase = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE ) if not 0 <= validation_size <= len(_SCREAMING_SNAKE_CASE ): __lowercase = ( "Validation size should be between 0 and " F"""{len(_SCREAMING_SNAKE_CASE )}. Received: {validation_size}.""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) __lowercase = train_images[:validation_size] __lowercase = train_labels[:validation_size] __lowercase = train_images[validation_size:] __lowercase = train_labels[validation_size:] __lowercase = {"dtype": dtype, "reshape": reshape, "seed": seed} __lowercase = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowercase = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowercase = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
707
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) snake_case__ : Optional[Any] = logging.getLogger() def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = "\n".join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random""" snake_case__ : int = """sshleifer/bart-tiny-random""" snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart""" snake_case__ : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _A ( _lowercase ): '''simple docstring''' def _snake_case ( self : str , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(lowerCamelCase , lowerCamelCase ) __lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): run_generate() assert Path(lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self : Dict ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' self.run_eval_tester(lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" __lowercase = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() __lowercase = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } __lowercase = Path(self.get_auto_remove_tmp_dir() ) __lowercase = str(tmp_dir / "scores.json" ) __lowercase = str(tmp_dir / "val.target" ) _dump_articles(lowerCamelCase , text["en"] ) _dump_articles(lowerCamelCase , text["de"] ) __lowercase = "translation_en_to_de" if model == T5_TINY else "summarization" __lowercase = f""" run_eval_search.py {model} {str(lowerCamelCase )} {str(lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(lowerCamelCase , "argv" , lowerCamelCase ): with CaptureStdout() as cs: run_search() __lowercase = [" num_beams | length_penalty", model, "Best score args"] __lowercase = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase ).exists() os.remove(Path(lowerCamelCase ) )
655
0
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-1_2 ): __lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T __lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T ) class _A ( nn.Module ): '''simple docstring''' _snake_case : CLIPConfig _snake_case : jnp.dtype = jnp.floataa def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = FlaxCLIPVisionModule(self.config.vision_config ) __lowercase = nn.Dense(self.config.projection_dim , use_bias=lowerCamelCase , dtype=self.dtype ) __lowercase = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __lowercase = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __lowercase = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) __lowercase = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = self.vision_model(lowerCamelCase )[1] __lowercase = self.visual_projection(lowerCamelCase ) __lowercase = jax_cosine_distance(lowerCamelCase , self.special_care_embeds ) __lowercase = jax_cosine_distance(lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __lowercase = 0.0 __lowercase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __lowercase = jnp.round(lowerCamelCase , 3 ) __lowercase = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCamelCase ) # Use a lower threshold if an image has any special care concept __lowercase = is_special_care * 0.01 __lowercase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __lowercase = jnp.round(lowerCamelCase , 3 ) __lowercase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _A ( _lowercase ): '''simple docstring''' _snake_case : str = CLIPConfig _snake_case : Optional[int] = """clip_input""" _snake_case : Optional[Any] = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Tuple , lowerCamelCase : CLIPConfig , lowerCamelCase : Optional[Tuple] = None , lowerCamelCase : int = 0 , lowerCamelCase : jnp.dtype = jnp.floataa , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ): '''simple docstring''' if input_shape is None: __lowercase = (1, 224, 224, 3) __lowercase = self.module_class(config=lowerCamelCase , dtype=lowerCamelCase , **lowerCamelCase ) super().__init__(lowerCamelCase , lowerCamelCase , input_shape=lowerCamelCase , seed=lowerCamelCase , dtype=lowerCamelCase , _do_init=_do_init ) def _snake_case ( self : List[Any] , lowerCamelCase : jax.random.KeyArray , lowerCamelCase : Tuple , lowerCamelCase : FrozenDict = None ): '''simple docstring''' __lowercase = jax.random.normal(lowerCamelCase , lowerCamelCase ) __lowercase , __lowercase = jax.random.split(lowerCamelCase ) __lowercase = {"params": params_rng, "dropout": dropout_rng} __lowercase = self.module.init(lowerCamelCase , lowerCamelCase )["params"] return random_params def __call__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : dict = None , ): '''simple docstring''' __lowercase = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
708
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : int _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""") def snake_case_ ( _SCREAMING_SNAKE_CASE ): if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowercase , __lowercase = get_distrib(node.left ) __lowercase , __lowercase = get_distrib(node.right ) __lowercase = 1 - left_distrib_excess __lowercase = 1 - right_distrib_excess __lowercase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) __lowercase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
655
0
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class _A ( _lowercase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = SMALL_MODEL_IDENTIFIER __lowercase = "pt" __lowercase = "tf" def _snake_case ( self : Union[str, Any] , lowerCamelCase : Any ): '''simple docstring''' __lowercase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(lowerCamelCase ) def _snake_case ( self : int , lowerCamelCase : List[Any] ): '''simple docstring''' __lowercase = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCamelCase ) model_tf.save_pretrained(lowerCamelCase ) def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = "mock_framework" # Framework provided - return whatever the user provides __lowercase = FeaturesManager.determine_framework(self.test_model , lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(lowerCamelCase ) __lowercase = FeaturesManager.determine_framework(lowerCamelCase , lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(lowerCamelCase ) __lowercase = FeaturesManager.determine_framework(lowerCamelCase , lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Dict ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(lowerCamelCase ) __lowercase = FeaturesManager.determine_framework(lowerCamelCase ) self.assertEqual(lowerCamelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(lowerCamelCase ) __lowercase = FeaturesManager.determine_framework(lowerCamelCase ) self.assertEqual(lowerCamelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(lowerCamelCase ): __lowercase = FeaturesManager.determine_framework(lowerCamelCase ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = MagicMock(return_value=lowerCamelCase ) with patch("transformers.onnx.features.is_tf_available" , lowerCamelCase ): __lowercase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowerCamelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __lowercase = MagicMock(return_value=lowerCamelCase ) with patch("transformers.onnx.features.is_torch_available" , lowerCamelCase ): __lowercase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowerCamelCase , self.framework_tf ) # Both in environment -> use PyTorch __lowercase = MagicMock(return_value=lowerCamelCase ) __lowercase = MagicMock(return_value=lowerCamelCase ) with patch("transformers.onnx.features.is_tf_available" , lowerCamelCase ), patch( "transformers.onnx.features.is_torch_available" , lowerCamelCase ): __lowercase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowerCamelCase , self.framework_pt ) # Both not in environment -> raise error __lowercase = MagicMock(return_value=lowerCamelCase ) __lowercase = MagicMock(return_value=lowerCamelCase ) with patch("transformers.onnx.features.is_tf_available" , lowerCamelCase ), patch( "transformers.onnx.features.is_torch_available" , lowerCamelCase ): with self.assertRaises(lowerCamelCase ): __lowercase = FeaturesManager.determine_framework(self.test_model )
709
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = SwinvaConfig() __lowercase = swinva_name.split("_" ) __lowercase = name_split[1] if "to" in name_split[3]: __lowercase = int(name_split[3][-3:] ) else: __lowercase = int(name_split[3] ) if "to" in name_split[2]: __lowercase = int(name_split[2][-2:] ) else: __lowercase = int(name_split[2][6:] ) if model_size == "tiny": __lowercase = 9_6 __lowercase = (2, 2, 6, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "small": __lowercase = 9_6 __lowercase = (2, 2, 1_8, 2) __lowercase = (3, 6, 1_2, 2_4) elif model_size == "base": __lowercase = 1_2_8 __lowercase = (2, 2, 1_8, 2) __lowercase = (4, 8, 1_6, 3_2) else: __lowercase = 1_9_2 __lowercase = (2, 2, 1_8, 2) __lowercase = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: __lowercase = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowercase = 2_1_8_4_1 __lowercase = "huggingface/label-files" __lowercase = "imagenet-22k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} else: __lowercase = 1_0_0_0 __lowercase = "huggingface/label-files" __lowercase = "imagenet-1k-id2label.json" __lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) __lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = img_size __lowercase = num_classes __lowercase = embed_dim __lowercase = depths __lowercase = num_heads __lowercase = window_size return config def snake_case_ ( _SCREAMING_SNAKE_CASE ): if "patch_embed.proj" in name: __lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __lowercase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __lowercase = "encoder." + name if "attn.proj" in name: __lowercase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __lowercase = name.replace("attn" , "attention.self" ) if "norm1" in name: __lowercase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __lowercase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __lowercase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __lowercase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __lowercase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __lowercase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __lowercase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __lowercase = "layernorm.weight" if name == "norm.bias": __lowercase = "layernorm.bias" if "head" in name: __lowercase = name.replace("head" , "classifier" ) else: __lowercase = "swinv2." + name return name def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for key in orig_state_dict.copy().keys(): __lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __lowercase = key.split("." ) __lowercase = int(key_split[1] ) __lowercase = int(key_split[3] ) __lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowercase = val[:dim, :] __lowercase = val[dim : dim * 2, :] __lowercase = val[-dim:, :] else: __lowercase = val[:dim] __lowercase = val[ dim : dim * 2 ] __lowercase = val[-dim:] else: __lowercase = val return orig_state_dict def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE ) __lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) __lowercase = timm_model(inputs["pixel_values"] ) __lowercase = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
655
0
'''simple docstring''' def snake_case_ ( _SCREAMING_SNAKE_CASE ): if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be a 'int' type" ) return bin(_SCREAMING_SNAKE_CASE ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
710
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } snake_case__ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case_ ( ): __lowercase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char return pairs class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __lowercase = json.load(lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : Optional[int] ): '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(lowerCamelCase ) __lowercase = get_pairs(lowerCamelCase ) if not pairs: return token while True: __lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(lowerCamelCase ): try: __lowercase = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(lowerCamelCase ) __lowercase = new_word if len(lowerCamelCase ) == 1: break else: __lowercase = get_pairs(lowerCamelCase ) __lowercase = " ".join(lowerCamelCase ) __lowercase = word return word def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = [] for token in re.findall(self.pat , lowerCamelCase ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __lowercase = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): '''simple docstring''' __lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' __lowercase = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: __lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
655
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process snake_case__ : Dict = logging.getLogger(__name__) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return (preds == labels).mean() @dataclass class _A : '''simple docstring''' _snake_case : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case : Optional[str] = field( default=_lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case : Optional[str] = field( default=_lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case : Optional[str] = field( default=_lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _A : '''simple docstring''' _snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) _snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) _snake_case : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case : bool = field( default=_lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def snake_case_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) try: __lowercase = processors[data_args.task_name]() __lowercase = processor.get_labels() __lowercase = len(_SCREAMING_SNAKE_CASE ) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets __lowercase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __lowercase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict: __lowercase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , p.label_ids )} # Data collator __lowercase = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowercase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowercase = trainer.evaluate() __lowercase = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_master(): with open(_SCREAMING_SNAKE_CASE , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) writer.write("%s = %s\n" % (key, value) ) results.update(_SCREAMING_SNAKE_CASE ) return results def snake_case_ ( _SCREAMING_SNAKE_CASE ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
711
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] # Creating a copy of the list and sorting profit/weight in ascending order __lowercase = sorted(_SCREAMING_SNAKE_CASE ) # declaring useful variables __lowercase = len(_SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __lowercase = sorted_profit_by_weight[length - i - 1] __lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE ) __lowercase = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()] snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()] snake_case__ : Optional[Any] = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
655
0
from PIL import Image def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase , __lowercase = image.size __lowercase = 0 __lowercase = image.load() for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): __lowercase = pixels[j, i] mean += pixel mean //= width * height for j in range(_SCREAMING_SNAKE_CASE ): for i in range(_SCREAMING_SNAKE_CASE ): __lowercase = 2_5_5 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": snake_case__ : int = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
712
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( _lowercase ): '''simple docstring''' _snake_case : Dict = """openai/whisper-base""" _snake_case : Union[str, Any] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) _snake_case : Any = """transcriber""" _snake_case : Any = WhisperProcessor _snake_case : Optional[int] = WhisperForConditionalGeneration _snake_case : str = ["""audio"""] _snake_case : Optional[int] = ["""text"""] def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features def _snake_case ( self : str , lowerCamelCase : List[Any] ): '''simple docstring''' return self.model.generate(inputs=lowerCamelCase ) def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
655
0
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class _A ( enum.Enum ): '''simple docstring''' _snake_case : str = 0 _snake_case : Any = 1 _snake_case : Optional[Any] = 2 @add_end_docstrings(_lowercase ) class _A ( _lowercase ): '''simple docstring''' _snake_case : Optional[Any] = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ): '''simple docstring''' super().__init__(*lowerCamelCase , **lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase = None if self.model.config.prefix is not None: __lowercase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase , __lowercase , __lowercase = self._sanitize_parameters(prefix=lowerCamelCase , **self._forward_params ) __lowercase = {**self._preprocess_params, **preprocess_params} __lowercase = {**self._forward_params, **forward_params} def _snake_case ( self : Any , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[str]=None , lowerCamelCase : int=None , lowerCamelCase : Any=None , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = {} if prefix is not None: __lowercase = prefix if prefix: __lowercase = self.tokenizer( lowerCamelCase , padding=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=self.framework ) __lowercase = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" " [None, 'hole']" ) __lowercase = handle_long_generation preprocess_params.update(lowerCamelCase ) __lowercase = generate_kwargs __lowercase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`" ) if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" ) __lowercase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`" ) __lowercase = ReturnType.TENSORS if return_type is not None: __lowercase = return_type if clean_up_tokenization_spaces is not None: __lowercase = clean_up_tokenization_spaces if stop_sequence is not None: __lowercase = self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) if len(lowerCamelCase ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __lowercase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _snake_case ( self : int , *lowerCamelCase : Optional[int] , **lowerCamelCase : Union[str, Any] ): '''simple docstring''' if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True} ) return super()._parse_and_tokenize(*lowerCamelCase , **lowerCamelCase ) def __call__( self : Tuple , lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ): '''simple docstring''' return super().__call__(lowerCamelCase , **lowerCamelCase ) def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Dict="" , lowerCamelCase : List[Any]=None , **lowerCamelCase : List[str] ): '''simple docstring''' __lowercase = self.tokenizer( prefix + prompt_text , padding=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=self.framework ) __lowercase = prompt_text if handle_long_generation == "hole": __lowercase = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase = generate_kwargs["max_new_tokens"] else: __lowercase = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected" ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) __lowercase = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: __lowercase = inputs["attention_mask"][:, -keep_length:] return inputs def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[Any] ): '''simple docstring''' __lowercase = model_inputs["input_ids"] __lowercase = model_inputs.get("attention_mask" , lowerCamelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase = None __lowercase = None __lowercase = 1 else: __lowercase = input_ids.shape[0] __lowercase = model_inputs.pop("prompt_text" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase = generate_kwargs.pop("prefix_length" , 0 ) if prefix_length > 0: __lowercase = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase = generate_kwargs.get("max_length" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase = self.model.generate(input_ids=lowerCamelCase , attention_mask=lowerCamelCase , **lowerCamelCase ) __lowercase = generated_sequence.shape[0] if self.framework == "pt": __lowercase = generated_sequence.reshape(lowerCamelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase = tf.reshape(lowerCamelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _snake_case ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[int]=ReturnType.FULL_TEXT , lowerCamelCase : List[str]=True ): '''simple docstring''' __lowercase = model_outputs["generated_sequence"][0] __lowercase = model_outputs["input_ids"] __lowercase = model_outputs["prompt_text"] __lowercase = generated_sequence.numpy().tolist() __lowercase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase = self.tokenizer.decode( lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase = 0 else: __lowercase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase = prompt_text + text[prompt_length:] else: __lowercase = text[prompt_length:] __lowercase = {"generated_text": all_text} records.append(lowerCamelCase ) return records
713
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _A : '''simple docstring''' def _snake_case ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) __lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) __lowercase = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) __lowercase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _snake_case ( self : str ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["prompt"] __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] if "image" in inputs: __lowercase = inputs["image"] else: __lowercase = None if "mask_image" in inputs: __lowercase = inputs["mask_image"] else: __lowercase = None if "original_image" in inputs: __lowercase = inputs["original_image"] else: __lowercase = None __lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase ) # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = inputs["generator"] __lowercase = inputs["num_inference_steps"] __lowercase = inputs["output_type"] # inputs with prompt converted to embeddings __lowercase = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: __lowercase = image if mask_image is not None: __lowercase = mask_image if original_image is not None: __lowercase = original_image __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe(**lowerCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase ) __lowercase = self.pipeline_class.from_pretrained(lowerCamelCase ) pipe_loaded.to(lowerCamelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __lowercase = self.get_dummy_inputs(lowerCamelCase ) __lowercase = pipe_loaded(**lowerCamelCase )[0] __lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max() self.assertLess(lowerCamelCase , 1e-4 )
655
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Union[str, Any] = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Any = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
714
import numpy as np snake_case__ : Tuple = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class _A : '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase , __lowercase = np.where(letter == self.SQUARE ) __lowercase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' __lowercase = self.SQUARE[indexa - 1, indexa - 1] return letter def _snake_case ( self : int , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() __lowercase = message.replace(" " , "" ) __lowercase = message.replace("j" , "i" ) __lowercase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape(2 * len(lowerCamelCase ) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[numbers_index * 2] ) __lowercase = int(second_step[(numbers_index * 2) + 1] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = encoded_message + letter return encoded_message def _snake_case ( self : Optional[Any] , lowerCamelCase : str ): '''simple docstring''' __lowercase = message.lower() message.replace(" " , "" ) __lowercase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): __lowercase = self.letter_to_numbers(message[letter_index] ) __lowercase = numbers[0] __lowercase = numbers[1] __lowercase = first_step.reshape((2, len(lowerCamelCase )) ) __lowercase = "" for numbers_index in range(len(lowerCamelCase ) ): __lowercase = int(second_step[0, numbers_index] ) __lowercase = int(second_step[1, numbers_index] ) __lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) __lowercase = decoded_message + letter return decoded_message
655
0
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class _A : '''simple docstring''' _snake_case : torch.Tensor # [batch_size x 3] _snake_case : torch.Tensor # [batch_size x 3] _snake_case : torch.Tensor # [batch_size x 3] _snake_case : torch.Tensor # [batch_size x 3] _snake_case : int _snake_case : int _snake_case : float _snake_case : float _snake_case : Tuple[int] def _snake_case ( self : List[str] ): '''simple docstring''' assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _snake_case ( self : Any ): '''simple docstring''' return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = torch.arange(self.height * self.width ) __lowercase = torch.stack( [ pixel_indices % self.width, torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ), ] , axis=1 , ) return coords @property def _snake_case ( self : Any ): '''simple docstring''' __lowercase , *__lowercase = self.shape __lowercase = int(np.prod(lowerCamelCase ) ) __lowercase = self.get_image_coords() __lowercase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) __lowercase = self.get_camera_rays(lowerCamelCase ) __lowercase = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def _snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ): '''simple docstring''' __lowercase , *__lowercase , __lowercase = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] __lowercase = coords.view(lowerCamelCase , -1 , 2 ) __lowercase = self.resolution() __lowercase = self.fov() __lowercase = (flat.float() / (res - 1)) * 2 - 1 __lowercase = fracs * torch.tan(fov / 2 ) __lowercase = fracs.view(lowerCamelCase , -1 , 2 ) __lowercase = ( self.z.view(lowerCamelCase , 1 , 3 ) + self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1] + self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:] ) __lowercase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase ) __lowercase = torch.stack( [ torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 ) def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : int ): '''simple docstring''' assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [] __lowercase = [] __lowercase = [] __lowercase = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): __lowercase = np.array([np.sin(_SCREAMING_SNAKE_CASE ), np.cos(_SCREAMING_SNAKE_CASE ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) __lowercase = -z * 4 __lowercase = np.array([np.cos(_SCREAMING_SNAKE_CASE ), -np.sin(_SCREAMING_SNAKE_CASE ), 0.0] ) __lowercase = np.cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) origins.append(_SCREAMING_SNAKE_CASE ) xs.append(_SCREAMING_SNAKE_CASE ) ys.append(_SCREAMING_SNAKE_CASE ) zs.append(_SCREAMING_SNAKE_CASE ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_SCREAMING_SNAKE_CASE )) , )
715
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _A ( ctypes.Structure ): '''simple docstring''' _snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def snake_case_ ( ): if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) __lowercase = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def snake_case_ ( ): try: hide_cursor() yield finally: show_cursor()
655
0