code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = StableDiffusionInpaintPipeline _a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS _a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _a = frozenset([] ) def a__ ( self ) -> Union[str, Any]: torch.manual_seed(0 ) _A : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , ) _A : List[str] = PNDMScheduler(skip_prk_steps=_a ) torch.manual_seed(0 ) _A : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _A : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) _A : Any = CLIPTextModel(_a ) _A : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _A : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self , _a , _a=0 ) -> str: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched _A : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) _A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] _A : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) ) _A : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) ) if str(_a ).startswith("""mps""" ): _A : Optional[Any] = torch.manual_seed(_a ) else: _A : Dict = torch.Generator(device=_a ).manual_seed(_a ) _A : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self ) -> List[Any]: _A : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : List[str] = self.get_dummy_components() _A : int = StableDiffusionInpaintPipeline(**_a ) _A : Dict = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) _A : Optional[Any] = self.get_dummy_inputs(_a ) _A : Optional[Any] = sd_pipe(**_a ).images _A : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A : Dict = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a__ ( self ) -> List[str]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ) -> str: _A : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) _A : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) _A : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) _A : Union[str, Any] = """stabilityai/stable-diffusion-2-inpainting""" _A : Dict = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() _A : int = """Face of a yellow cat, high resolution, sitting on a park bench""" _A : List[Any] = torch.manual_seed(0 ) _A : str = pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , ) _A : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def a__ ( self ) -> List[Any]: _A : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) _A : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) _A : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) _A : Optional[Any] = """stabilityai/stable-diffusion-2-inpainting""" _A : Tuple = StableDiffusionInpaintPipeline.from_pretrained( _a , torch_dtype=torch.floataa , safety_checker=_a , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() _A : List[str] = """Face of a yellow cat, high resolution, sitting on a park bench""" _A : Dict = torch.manual_seed(0 ) _A : Optional[int] = pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , ) _A : Any = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def a__ ( self ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) _A : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) _A : str = """stabilityai/stable-diffusion-2-inpainting""" _A : List[str] = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" ) _A : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _A : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench""" _A : List[Any] = torch.manual_seed(0 ) _A : Optional[int] = pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , ) _A : Optional[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class lowercase ( UpperCamelCase__ ): _a = field(default="automatic-speech-recognition",metadata={"include_in_asdict_even_if_is_default": True} ) _a = Features({"audio": Audio()} ) _a = Features({"transcription": Value("string" )} ) _a = "audio" _a = "transcription" def a__ ( self , _a ) -> Union[str, Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , _a ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) _A : Union[str, Any] = copy.deepcopy(self ) _A : List[str] = self.input_schema.copy() _A : int = features[self.audio_column] _A : List[Any] = input_schema return task_template @property def a__ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowercase ( UpperCamelCase__ ): @slow @require_torch def a__ ( self ) -> Dict: _A : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) _A : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" ) _A : Dict = bertabert.config.encoder.vocab_size _A : List[str] = tokenizer.sep_token_id _A : Dict = tokenizer.cls_token_id _A : List[Any] = 128 _A : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) _A : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) _A : Union[str, Any] = train_dataset.select(range(32 ) ) _A : str = val_dataset.select(range(16 ) ) _A : Dict = 4 def _map_to_encoder_decoder_inputs(_a ): # Tokenizer will automatically set [BOS] <text> [EOS] _A : List[Any] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_a , max_length=512 ) _A : int = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_a , max_length=128 ) _A : Union[str, Any] = inputs.input_ids _A : List[Any] = inputs.attention_mask _A : Dict = outputs.input_ids _A : str = outputs.input_ids.copy() _A : Union[str, Any] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] _A : Dict = outputs.attention_mask assert all(len(_a ) == 512 for x in inputs.input_ids ) assert all(len(_a ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_a ): _A : int = pred.label_ids _A : str = pred.predictions # all unnecessary tokens are removed _A : Dict = tokenizer.batch_decode(_a , skip_special_tokens=_a ) _A : Optional[Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a ) _A : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a ) return {"accuracy": accuracy} # map train dataset _A : Dict = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset _A : Optional[int] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) _A : Optional[int] = self.get_auto_remove_tmp_dir() _A : Any = SeqaSeqTrainingArguments( output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy="""steps""" , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _A : Optional[Any] = SeqaSeqTrainer( model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , ) # start training trainer.train()
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class lowercase : _a = 42 _a = None # Automatically constructed _a = "dict" _a = None _a = field(default="Translation",init=UpperCamelCase__,repr=UpperCamelCase__ ) def __call__( self ) -> int: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class lowercase : _a = None _a = None _a = None # Automatically constructed _a = "dict" _a = None _a = field(default="TranslationVariableLanguages",init=UpperCamelCase__,repr=UpperCamelCase__ ) def a__ ( self ) -> str: _A : str = sorted(set(self.languages ) ) if self.languages else None _A : Any = len(self.languages ) if self.languages else None def __call__( self ) -> Union[str, Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def a__ ( self , _a ) -> Optional[int]: _A : Optional[Any] = set(self.languages ) if self.languages and set(_a ) - lang_set: raise ValueError( F'''Some languages in example ({", ".join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({", ".join(_a )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _A : Any = [] for lang, text in translation_dict.items(): if isinstance(_a , _a ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _A , _A : str = zip(*sorted(_a ) ) return {"language": languages, "translation": translations} def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def lowerCAmelCase_ ( snake_case_ ): _A , _A : Tuple = analyze_text(snake_case_ ) _A : List[str] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. _A : Dict = sum(single_char_strings.values() ) # one length string _A : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _A : List[str] = single_char_strings[ch] _A : Optional[Any] = my_str / all_sum my_fir_sum += prob * math.loga(snake_case_ ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _A : str = sum(two_char_strings.values() ) _A : Union[str, Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _A : Union[str, Any] = cha + cha if sequence in two_char_strings: _A : List[Any] = two_char_strings[sequence] _A : str = int(snake_case_ ) / all_sum my_sec_sum += prob * math.loga(snake_case_ ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def lowerCAmelCase_ ( snake_case_ ): _A : List[str] = Counter() # type: ignore _A : Optional[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0,len(snake_case_ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def lowerCAmelCase_ ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a__ ( self , _a=0 ) -> Optional[Any]: _A : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_a ) ) _A : Optional[Any] = np.random.RandomState(_a ) _A : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self ) -> int: _A : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) _A : Any = self.get_dummy_inputs() _A : List[str] = pipe(**_a ).images _A : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) _A : List[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def a__ ( self ) -> Dict: _A : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) _A : List[Any] = self.get_dummy_inputs() _A : Union[str, Any] = pipe(**_a ).images _A : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _A : Dict = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def a__ ( self ) -> List[Any]: _A : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) # warmup pass to apply optimizations _A : List[Any] = pipe(**self.get_dummy_inputs() ) _A : Tuple = self.get_dummy_inputs() _A : List[str] = pipe(**_a ).images _A : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _A : Dict = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def a__ ( self ) -> Union[str, Any]: _A : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) _A : int = self.get_dummy_inputs() _A : Tuple = pipe(**_a ).images _A : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _A : Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def a__ ( self ) -> List[Any]: _A : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) _A : Optional[Any] = self.get_dummy_inputs() _A : Tuple = pipe(**_a ).images _A : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _A : str = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def a__ ( self ) -> List[str]: _A : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) _A : Union[str, Any] = self.get_dummy_inputs() _A : List[str] = pipe(**_a ).images _A : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _A : Optional[int] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowercase ( unittest.TestCase ): @property def a__ ( self ) -> str: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a__ ( self ) -> Optional[int]: _A : Union[str, Any] = ort.SessionOptions() _A : List[str] = False return options def a__ ( self ) -> Tuple: _A : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _A : Optional[Any] = init_image.resize((768, 512) ) # using the PNDM scheduler by default _A : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) _A : str = """A fantasy landscape, trending on artstation""" _A : List[str] = np.random.RandomState(0 ) _A : Dict = pipe( prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" , ) _A : Optional[Any] = output.images _A : Optional[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) _A : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def a__ ( self ) -> Optional[Any]: _A : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _A : Union[str, Any] = init_image.resize((768, 512) ) _A : Any = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) _A : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) _A : str = """A fantasy landscape, trending on artstation""" _A : int = np.random.RandomState(0 ) _A : Optional[Any] = pipe( prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type="""np""" , ) _A : Tuple = output.images _A : List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) _A : List[Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
from math import sqrt def lowerCAmelCase_ ( snake_case_ ): _A : Any = 0 for i in range(1,int(sqrt(snake_case_ ) + 1 ) ): if n % i == 0 and i != sqrt(snake_case_ ): total += i + n // i elif i == sqrt(snake_case_ ): total += i return total - n def lowerCAmelCase_ ( snake_case_ = 10000 ): _A : List[Any] = sum( i for i in range(1,snake_case_ ) if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
from __future__ import annotations from math import ceil, floor, sqrt def lowerCAmelCase_ ( snake_case_ = 2000000 ): _A : list[int] = [0] _A : int for idx in range(1,ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target _A : int = 0 # the area corresponding to the grid that gives the product closest to target _A : int = 0 # an estimate of b, using the quadratic formula _A : float # the largest integer less than b_estimate _A : int # the largest integer less than b_estimate _A : int # the triangle number corresponding to b_floor _A : int # the triangle number corresponding to b_ceil _A : int for idx_a, triangle_a in enumerate(triangle_numbers[1:],1 ): _A : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 _A : Optional[int] = floor(snake_case_ ) _A : List[str] = ceil(snake_case_ ) _A : Optional[int] = triangle_numbers[b_floor] _A : Tuple = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): _A : Union[str, Any] = triangle_b_first_guess * triangle_a _A : Optional[Any] = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): _A : int = triangle_b_second_guess * triangle_a _A : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowercase ( unittest.TestCase ): def a__ ( self ) -> Union[str, Any]: _A : List[str] = 0 def a__ ( self ) -> str: _A : Union[str, Any] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(_a , _a ) def a__ ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: _A : Dict = Path(_a ) / """preprocessor_config.json""" _A : Union[str, Any] = Path(_a ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) ) _A : Optional[Any] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def a__ ( self ) -> Optional[Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _A : List[str] = Path(_a ) / """preprocessor_config.json""" _A : str = Path(_a ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) ) _A : int = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: _A : List[Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type _A : Tuple = Path(_a ) / """preprocessor_config.json""" _A : Any = Path(_a ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _A : Optional[Any] = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop("""image_processor_type""" ) _A : Any = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) _A : int = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved _A : Optional[Any] = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(_a , _a ) def a__ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: _A : Tuple = Path(_a ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , ) _A : List[str] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def a__ ( self ) -> Tuple: with self.assertRaisesRegex( _a , """clip-base is not a local folder and is not a valid model identifier""" ): _A : Tuple = AutoImageProcessor.from_pretrained("""clip-base""" ) def a__ ( self ) -> Optional[Any]: with self.assertRaisesRegex( _a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _A : List[Any] = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" ) def a__ ( self ) -> Optional[int]: with self.assertRaisesRegex( _a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): _A : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def a__ ( self ) -> Optional[Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): _A : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): _A : int = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a ) _A : Tuple = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) _A : List[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def a__ ( self ) -> List[str]: try: AutoConfig.register("""custom""" , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: _A : Dict = Path(_a ) / """preprocessor_config.json""" _A : Optional[Any] = Path(_a ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) ) _A : str = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) _A : List[Any] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> Optional[int]: class lowercase ( UpperCamelCase__ ): _a = True try: AutoConfig.register("""custom""" , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local _A : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _A : Dict = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _A : str = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(_a , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
import math def lowerCAmelCase_ ( snake_case_,snake_case_ = 0,snake_case_ = 0 ): _A : Tuple = end or len(snake_case_ ) for i in range(snake_case_,snake_case_ ): _A : List[str] = i _A : Optional[Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _A : int = array[temp_index - 1] temp_index -= 1 _A : Dict = temp_index_value return array def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # Max Heap _A : Dict = index _A : Union[str, Any] = 2 * index + 1 # Left Node _A : List[Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _A : str = left_index if right_index < heap_size and array[largest] < array[right_index]: _A : str = right_index if largest != index: _A , _A : List[Any] = array[largest], array[index] heapify(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Any = len(snake_case_ ) for i in range(n // 2,-1,-1 ): heapify(snake_case_,snake_case_,snake_case_ ) for i in range(n - 1,0,-1 ): _A , _A : Union[str, Any] = array[0], array[i] heapify(snake_case_,0,snake_case_ ) return array def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : int = low _A : List[Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _A , _A : str = array[j], array[i] i += 1 def lowerCAmelCase_ ( snake_case_ ): if len(snake_case_ ) == 0: return array _A : Tuple = 2 * math.ceil(math.loga(len(snake_case_ ) ) ) _A : List[str] = 16 return intro_sort(snake_case_,0,len(snake_case_ ),snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ): while end - start > size_threshold: if max_depth == 0: return heap_sort(snake_case_ ) max_depth -= 1 _A : Optional[Any] = median_of_a(snake_case_,snake_case_,start + ((end - start) // 2) + 1,end - 1 ) _A : List[Any] = partition(snake_case_,snake_case_,snake_case_,snake_case_ ) intro_sort(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ) _A : Optional[Any] = p return insertion_sort(snake_case_,snake_case_,snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input("Enter numbers separated by a comma : ").strip() _snake_case = [float(item) for item in user_input.split(",")] print(sort(unsorted))
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _snake_case = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class lowercase ( unittest.TestCase ): @classmethod def a__ ( cls ) -> Optional[int]: _A : Optional[int] = TOKEN HfFolder.save_token(_a ) @classmethod def a__ ( cls ) -> List[Any]: try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def a__ ( self ) -> Dict: _A : Dict = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) _A : Dict = FlaxBertModel(_a ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) _A : Dict = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) _A : Optional[int] = flatten_dict(unfreeze(model.params ) ) _A : str = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): _A : Optional[int] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_a , repo_id="""test-model-flax""" , push_to_hub=_a , use_auth_token=self._token ) _A : str = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) _A : str = flatten_dict(unfreeze(model.params ) ) _A : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): _A : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' ) def a__ ( self ) -> Dict: _A : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) _A : List[str] = FlaxBertModel(_a ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) _A : Union[str, Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) _A : str = flatten_dict(unfreeze(model.params ) ) _A : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): _A : Optional[Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _a , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_a , use_auth_token=self._token ) _A : Optional[Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) _A : List[Any] = flatten_dict(unfreeze(model.params ) ) _A : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): _A : Optional[int] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[Any] = True _A : Union[str, Any] = flatten_dict(modela.params ) _A : Optional[Any] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: _A : List[str] = False return models_are_equal @require_flax class lowercase ( unittest.TestCase ): def a__ ( self ) -> Union[str, Any]: _A : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) _A : Optional[int] = FlaxBertModel(_a ) _A : List[str] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_a , _a ) ) with self.assertRaises(_a ): _A : int = FlaxBertModel.from_pretrained(_a ) _A : Optional[Any] = FlaxBertModel.from_pretrained(_a , subfolder=_a ) self.assertTrue(check_models_equal(_a , _a ) ) def a__ ( self ) -> Tuple: _A : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) _A : Any = FlaxBertModel(_a ) _A : Optional[Any] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_a , _a ) , max_shard_size="""10KB""" ) with self.assertRaises(_a ): _A : Any = FlaxBertModel.from_pretrained(_a ) _A : List[str] = FlaxBertModel.from_pretrained(_a , subfolder=_a ) self.assertTrue(check_models_equal(_a , _a ) ) def a__ ( self ) -> Optional[Any]: _A : List[str] = """bert""" _A : List[Any] = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(_a ): _A : Dict = FlaxBertModel.from_pretrained(_a ) _A : Tuple = FlaxBertModel.from_pretrained(_a , subfolder=_a ) self.assertIsNotNone(_a ) def a__ ( self ) -> Union[str, Any]: _A : List[str] = """bert""" _A : str = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(_a ): _A : Tuple = FlaxBertModel.from_pretrained(_a ) _A : str = FlaxBertModel.from_pretrained(_a , subfolder=_a ) self.assertIsNotNone(_a )
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
import enum import shutil import sys _snake_case , _snake_case = shutil.get_terminal_size() _snake_case = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class lowercase ( enum.Enum ): _a = 0 _a = 1 def lowerCAmelCase_ ( snake_case_,snake_case_="" ): sys.stdout.write(str(snake_case_ ) + end ) sys.stdout.flush() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_="" ): forceWrite(f'''\u001b[{color}m{content}\u001b[0m''',snake_case_ ) def lowerCAmelCase_ ( ): forceWrite("""\r""" ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' ) def lowerCAmelCase_ ( ): forceWrite(""" """ * TERMINAL_WIDTH ) reset_cursor() def lowerCAmelCase_ ( ): reset_cursor() forceWrite("""-""" * TERMINAL_WIDTH )
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
import os from datetime import datetime as dt from github import Github _snake_case = [ "good first issue", "feature request", "wip", ] def lowerCAmelCase_ ( ): _A : Dict = Github(os.environ["""GITHUB_TOKEN"""] ) _A : Union[str, Any] = g.get_repo("""huggingface/accelerate""" ) _A : List[Any] = repo.get_issues(state="""open""" ) for issue in open_issues: _A : Tuple = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ ) _A : int = comments[0] if len(snake_case_ ) > 0 else None _A : Dict = dt.utcnow() _A : Union[str, Any] = (current_time - issue.updated_at).days _A : Union[str, Any] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="""closed""" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
import math def lowerCAmelCase_ ( snake_case_ = 100 ): _A : Optional[Any] = sum(i * i for i in range(1,n + 1 ) ) _A : Optional[Any] = int(math.pow(sum(range(1,n + 1 ) ),2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( snake_case_ ): return np.dot(snake_case_,snake_case_ ) class lowercase : def __init__( self , *, _a = np.inf , _a = "linear" , _a = 0.0 , ) -> None: _A : List[Any] = regularization _A : Optional[int] = gamma if kernel == "linear": _A : List[str] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("""gamma must be float or int""" ) if not self.gamma > 0: raise ValueError("""gamma must be > 0""" ) _A : Optional[int] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: _A : Dict = F'''Unknown kernel: {kernel}''' raise ValueError(_a ) def a__ ( self , _a , _a ) -> float: return np.dot(_a , _a ) def a__ ( self , _a , _a ) -> float: return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def a__ ( self , _a , _a ) -> None: _A : List[Any] = observations _A : Optional[int] = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((_A) , ) : Union[str, Any] = np.shape(_a ) def to_minimize(_a ) -> float: _A : str = 0 ((_A) , ) : Optional[int] = np.shape(_a ) for i in range(_a ): for j in range(_a ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(_a ) _A : Optional[int] = LinearConstraint(_a , 0 , 0 ) _A : List[str] = Bounds(0 , self.regularization ) _A : Dict = minimize( _a , np.ones(_a ) , bounds=_a , constraints=[ly_contraint] ).x _A : Union[str, Any] = l_star # calculating mean offset of separation plane to points _A : Optional[int] = 0 for i in range(_a ): for j in range(_a ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) _A : str = s / n def a__ ( self , _a ) -> int: _A : Union[str, Any] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , _a ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : str = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def lowerCAmelCase_ ( snake_case_,snake_case_ ): for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) _A : Tuple = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) _A : Tuple = in_proj_weight[ : encoder_config.hidden_size, : ] _A : Optional[int] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] _A : Dict = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : List[Any] = dct.pop(snake_case_ ) _A : Dict = val def lowerCAmelCase_ ( snake_case_ ): if "handwritten" in checkpoint_url: _A : Union[str, Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: _A : Dict = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" _A : Union[str, Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Any = ViTConfig(image_size=384,qkv_bias=snake_case_ ) _A : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: _A : int = 768 elif "large" in checkpoint_url: # use ViT-large encoder _A : int = 1024 _A : Tuple = 4096 _A : Union[str, Any] = 24 _A : str = 16 _A : int = 1024 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: _A : Optional[Any] = False _A : Union[str, Any] = """relu""" _A : int = 1024 _A : Union[str, Any] = True _A : Union[str, Any] = False _A : Optional[Any] = False # load HuggingFace model _A : List[str] = ViTModel(snake_case_,add_pooling_layer=snake_case_ ) _A : List[str] = TrOCRForCausalLM(snake_case_ ) _A : Any = VisionEncoderDecoderModel(encoder=snake_case_,decoder=snake_case_ ) model.eval() # load state_dict of original model, rename some keys _A : Dict = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ )["""model"""] _A : Optional[int] = create_rename_keys(snake_case_,snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) read_in_q_k_v(snake_case_,snake_case_ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): _A : List[str] = state_dict.pop(snake_case_ ) if key.startswith("""decoder""" ) and "output_projection" not in key: _A : Optional[int] = val else: _A : Dict = val # load state dict model.load_state_dict(snake_case_ ) # Check outputs on an image _A : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) _A : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" ) _A : Dict = TrOCRProcessor(snake_case_,snake_case_ ) _A : Tuple = processor(images=prepare_img(snake_case_ ),return_tensors="""pt""" ).pixel_values # verify logits _A : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) _A : Optional[int] = model(pixel_values=snake_case_,decoder_input_ids=snake_case_ ) _A : int = outputs.logits _A : Optional[int] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: _A : Dict = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: _A : Tuple = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: _A : int = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: _A : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10],snake_case_,atol=1e-3 ), "First elements of logits not as expected" Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) _snake_case = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class lowercase ( UpperCamelCase__ ): _a = "camembert" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : List[Any] = hidden_size _A : Dict = num_hidden_layers _A : int = num_attention_heads _A : Dict = hidden_act _A : str = intermediate_size _A : List[Any] = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : int = max_position_embeddings _A : Tuple = type_vocab_size _A : List[str] = initializer_range _A : Dict = layer_norm_eps _A : str = position_embedding_type _A : Dict = use_cache _A : Any = classifier_dropout class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Optional[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "rwkv" _a = {"max_position_embeddings": "context_length"} def __init__( self , _a=5_0277 , _a=1024 , _a=4096 , _a=32 , _a=None , _a=None , _a=1e-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ) -> Dict: _A : Tuple = vocab_size _A : Dict = context_length _A : Optional[Any] = hidden_size _A : List[Any] = num_hidden_layers _A : Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size _A : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _A : Union[str, Any] = layer_norm_epsilon _A : str = rescale_every _A : List[Any] = use_cache _A : int = bos_token_id _A : List[Any] = eos_token_id super().__init__( tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
def lowerCAmelCase_ ( snake_case_ ): if not isinstance(snake_case_,snake_case_ ): _A : Tuple = f'''Input value of [number={number}] must be an integer''' raise TypeError(snake_case_ ) if number < 0: return False _A : Union[str, Any] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class lowercase ( UpperCamelCase__ ): _a = "openai-gpt" _a = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _a=4_0478 , _a=512 , _a=768 , _a=12 , _a=12 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ) -> List[str]: _A : List[Any] = vocab_size _A : Tuple = n_positions _A : Union[str, Any] = n_embd _A : Union[str, Any] = n_layer _A : List[str] = n_head _A : int = afn _A : str = resid_pdrop _A : Optional[Any] = embd_pdrop _A : int = attn_pdrop _A : Dict = layer_norm_epsilon _A : int = initializer_range _A : int = summary_type _A : List[str] = summary_use_proj _A : Dict = summary_activation _A : Dict = summary_first_dropout _A : Any = summary_proj_to_labels super().__init__(**_a )
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
def lowerCAmelCase_ ( snake_case_ ): _A : Any = len(snake_case_ ) for i in range(1,snake_case_ ): _A : str = collection[i] _A : Optional[int] = 0 _A : Tuple = i - 1 while low <= high: _A : Any = (low + high) // 2 if val < collection[mid]: _A : int = mid - 1 else: _A : Union[str, Any] = mid + 1 for j in range(snake_case_,snake_case_,-1 ): _A : List[str] = collection[j - 1] _A : int = val return collection if __name__ == "__main__": _snake_case = input("Enter numbers separated by a comma:\n").strip() _snake_case = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # Base Case if curr_ind == len(snake_case_ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0,len(snake_case_ ) ): if valid_connection(snake_case_,snake_case_,snake_case_,snake_case_ ): # Insert current vertex into path as next transition _A : Dict = next_ver # Validate created path if util_hamilton_cycle(snake_case_,snake_case_,curr_ind + 1 ): return True # Backtrack _A : Any = -1 return False def lowerCAmelCase_ ( snake_case_,snake_case_ = 0 ): _A : int = [-1] * (len(snake_case_ ) + 1) # initialize start and end of path with starting index _A : Dict = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(snake_case_,snake_case_,1 ) else []
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase ( UpperCamelCase__ ): _a = ["image_processor", "tokenizer"] _a = "CLIPImageProcessor" _a = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Dict: _A : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _a , ) _A : Union[str, Any] = kwargs.pop("""feature_extractor""" ) _A : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , **_a ) -> int: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _A : List[str] = self.tokenizer(_a , return_tensors=_a , **_a ) if images is not None: _A : Dict = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def a__ ( self , *_a , **_a ) -> List[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def a__ ( self , *_a , **_a ) -> Any: return self.tokenizer.decode(*_a , **_a ) @property def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.tokenizer.model_input_names _A : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def a__ ( self ) -> List[str]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , ) return self.image_processor_class @property def a__ ( self ) -> List[Any]: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , ) return self.image_processor
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _snake_case = logging.get_logger(__name__) class lowercase ( UpperCamelCase__ ): def __init__( self , *_a , **_a ) -> None: warnings.warn( """The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DonutImageProcessor instead.""" , _a , ) super().__init__(*_a , **_a )
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _snake_case = threading.Lock() _snake_case = None _snake_case = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _snake_case = logging.WARNING _snake_case = True def lowerCAmelCase_ ( ): _A : str = os.getenv("""TRANSFORMERS_VERBOSITY""",snake_case_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ", ".join(log_levels.keys() ) }''' ) return _default_log_level def lowerCAmelCase_ ( ): return __name__.split(""".""" )[0] def lowerCAmelCase_ ( ): return logging.getLogger(_get_library_name() ) def lowerCAmelCase_ ( ): global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _A : Union[str, Any] = logging.StreamHandler() # Set sys.stderr as stream. _A : Tuple = sys.stderr.flush # Apply our default configuration to the library root logger. _A : int = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) _A : List[str] = False def lowerCAmelCase_ ( ): global _default_handler with _lock: if not _default_handler: return _A : List[str] = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) _A : Tuple = None def lowerCAmelCase_ ( ): return log_levels def lowerCAmelCase_ ( snake_case_ = None ): if name is None: _A : Optional[int] = _get_library_name() _configure_library_root_logger() return logging.getLogger(snake_case_ ) def lowerCAmelCase_ ( ): _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase_ ( snake_case_ ): _configure_library_root_logger() _get_library_root_logger().setLevel(snake_case_ ) def lowerCAmelCase_ ( ): return set_verbosity(snake_case_ ) def lowerCAmelCase_ ( ): return set_verbosity(snake_case_ ) def lowerCAmelCase_ ( ): return set_verbosity(snake_case_ ) def lowerCAmelCase_ ( ): return set_verbosity(snake_case_ ) def lowerCAmelCase_ ( ): _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def lowerCAmelCase_ ( ): _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def lowerCAmelCase_ ( snake_case_ ): _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(snake_case_ ) def lowerCAmelCase_ ( ): _configure_library_root_logger() _A : Union[str, Any] = False def lowerCAmelCase_ ( ): _configure_library_root_logger() _A : List[Any] = True def lowerCAmelCase_ ( ): _A : Union[str, Any] = _get_library_root_logger().handlers for handler in handlers: _A : str = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" ) handler.setFormatter(snake_case_ ) def lowerCAmelCase_ ( ): _A : Any = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(snake_case_ ) def lowerCAmelCase_ ( self,*snake_case_,**snake_case_ ): _A : str = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""",snake_case_ ) if no_advisory_warnings: return self.warning(*snake_case_,**snake_case_ ) _snake_case = warning_advice @functools.lru_cache(snake_case_ ) def lowerCAmelCase_ ( self,*snake_case_,**snake_case_ ): self.warning(*snake_case_,**snake_case_ ) _snake_case = warning_once class lowercase : def __init__( self , *_a , **_a ) -> List[Any]: # pylint: disable=unused-argument _A : List[str] = args[0] if args else None def __iter__( self ) -> Optional[Any]: return iter(self._iterator ) def __getattr__( self , _a ) -> Union[str, Any]: def empty_fn(*_a , **_a ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> Tuple: return self def __exit__( self , _a , _a , _a ) -> List[str]: return class lowercase : def __call__( self , *_a , **_a ) -> Union[str, Any]: if _tqdm_active: return tqdm_lib.tqdm(*_a , **_a ) else: return EmptyTqdm(*_a , **_a ) def a__ ( self , *_a , **_a ) -> Dict: _A : Optional[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_a , **_a ) def a__ ( self ) -> Union[str, Any]: if _tqdm_active: return tqdm_lib.tqdm.get_lock() _snake_case = _tqdm_cls() def lowerCAmelCase_ ( ): global _tqdm_active return bool(_tqdm_active ) def lowerCAmelCase_ ( ): global _tqdm_active _A : Optional[int] = True hf_hub_utils.enable_progress_bars() def lowerCAmelCase_ ( ): global _tqdm_active _A : List[str] = False hf_hub_utils.disable_progress_bars()
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device("cpu") def lowerCAmelCase_ ( ): _A : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im def lowerCAmelCase_ ( snake_case_ ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : str = dct.pop(snake_case_ ) _A : Optional[Any] = val def lowerCAmelCase_ ( snake_case_ ): _A : int = [] for k in state_dict.keys(): _A : Optional[Any] = k if ".pwconv" in k: _A : int = k_new.replace(""".pwconv""",""".point_wise_conv""" ) if ".dwconv" in k: _A : Dict = k_new.replace(""".dwconv""",""".depth_wise_conv""" ) if ".Proj." in k: _A : str = k_new.replace(""".Proj.""",""".proj.""" ) if "patch_embed" in k_new: _A : Optional[Any] = k_new.replace("""patch_embed""","""swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _A : Any = k_new.split(""".""" ) if ls[2].isdigit(): _A : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _A : Optional[Any] = k_new.replace("""network""","""swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _A : str = 1000 _A : List[str] = """huggingface/label-files""" _A : Union[str, Any] = """imagenet-1k-id2label.json""" _A : Tuple = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : int = {int(snake_case_ ): v for k, v in idalabel.items()} _A : Any = idalabel _A : List[str] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _A : List[Any] = [3, 3, 6, 4] _A : List[Any] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": _A : Dict = [3, 3, 9, 6] _A : Union[str, Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": _A : Optional[Any] = [4, 3, 10, 5] _A : Optional[Any] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": _A : Tuple = [4, 4, 12, 6] _A : List[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _A : Optional[Any] = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ ) else: _A : List[Any] = torch.load(snake_case_,map_location="""cpu""" ) _A : Dict = checkpoint _A : Dict = create_rename_keys(snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load HuggingFace model _A : str = SwiftFormerForImageClassification(snake_case_ ).eval() hf_model.load_state_dict(snake_case_ ) # prepare test inputs _A : Any = prepare_img() _A : Optional[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _A : Any = processor(images=snake_case_,return_tensors="""pt""" ) # compare outputs from both models _A : Union[str, Any] = get_expected_output(snake_case_ ) _A : List[Any] = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5],snake_case_,atol=1e-3 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { "configuration_x_clip": [ "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig", ], "processing_x_clip": ["XCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowercase ( UpperCamelCase__ ): _a = "visual_bert" def __init__( self , _a=3_0522 , _a=768 , _a=512 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Tuple: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : int = vocab_size _A : Dict = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[Any] = visual_embedding_dim _A : Optional[Any] = num_hidden_layers _A : Tuple = num_attention_heads _A : str = intermediate_size _A : Dict = hidden_act _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Optional[int] = initializer_range _A : List[Any] = type_vocab_size _A : int = layer_norm_eps _A : Optional[int] = bypass_transformer _A : List[Any] = special_visual_initialize
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowercase ( UpperCamelCase__ ): _a = "time_series_transformer" _a = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 64 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , **_a , ) -> Union[str, Any]: # time series specific configuration _A : Any = prediction_length _A : List[Any] = context_length or prediction_length _A : Dict = distribution_output _A : List[str] = loss _A : Any = input_size _A : Optional[Any] = num_time_features _A : Optional[int] = lags_sequence _A : Optional[int] = scaling _A : Optional[Any] = num_dynamic_real_features _A : str = num_static_real_features _A : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) _A : List[str] = cardinality else: _A : int = [0] if embedding_dimension and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) _A : Tuple = embedding_dimension else: _A : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _A : List[Any] = num_parallel_samples # Transformer architecture configuration _A : Optional[Any] = input_size * len(_a ) + self._number_of_features _A : Optional[int] = d_model _A : List[str] = encoder_attention_heads _A : str = decoder_attention_heads _A : Any = encoder_ffn_dim _A : Dict = decoder_ffn_dim _A : int = encoder_layers _A : Dict = decoder_layers _A : int = dropout _A : Tuple = attention_dropout _A : List[str] = activation_dropout _A : Optional[int] = encoder_layerdrop _A : int = decoder_layerdrop _A : Optional[int] = activation_function _A : Dict = init_std _A : List[str] = use_cache super().__init__(is_encoder_decoder=_a , **_a ) @property def a__ ( self ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
from __future__ import annotations from typing import Generic, TypeVar _snake_case = TypeVar("T") class lowercase ( Generic[T] ): def __init__( self , _a ) -> None: _A : List[str] = data _A : int = self _A : Any = 0 class lowercase ( Generic[T] ): def __init__( self ) -> None: # map from node name to the node object _A : dict[T, DisjointSetTreeNode[T]] = {} def a__ ( self , _a ) -> None: # create a new set with x as its member _A : Union[str, Any] = DisjointSetTreeNode(_a ) def a__ ( self , _a ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) _A : Any = self.map[data] if elem_ref != elem_ref.parent: _A : Optional[int] = self.find_set(elem_ref.parent.data ) return elem_ref.parent def a__ ( self , _a , _a ) -> None: # helper function for union operation if nodea.rank > nodea.rank: _A : List[str] = nodea else: _A : str = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def a__ ( self , _a , _a ) -> None: # merge 2 disjoint sets self.link(self.find_set(_a ) , self.find_set(_a ) ) class lowercase ( Generic[T] ): def __init__( self ) -> None: # connections: map from the node to the neighbouring nodes (with weights) _A : dict[T, dict[T, int]] = {} def a__ ( self , _a ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: _A : Dict = {} def a__ ( self , _a , _a , _a ) -> None: # add an edge with the given weight self.add_node(_a ) self.add_node(_a ) _A : Optional[int] = weight _A : Tuple = weight def a__ ( self ) -> GraphUndirectedWeighted[T]: _A : Dict = [] _A : Dict = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _a : x[2] ) # creating the disjoint set _A : Any = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_a ) # MST generation _A : Optional[int] = 0 _A : List[Any] = 0 _A : Dict = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: _A , _A , _A : Tuple = edges[index] index += 1 _A : Optional[Any] = disjoint_set.find_set(_a ) _A : List[str] = disjoint_set.find_set(_a ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_a , _a , _a ) disjoint_set.union(_a , _a ) return graph
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "gpt_bigcode" _a = ["past_key_values"] _a = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _a=5_0257 , _a=1024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a=True , _a=True , _a=5_0256 , _a=5_0256 , _a=True , _a=True , _a=True , **_a , ) -> Union[str, Any]: _A : List[str] = vocab_size _A : List[Any] = n_positions _A : Union[str, Any] = n_embd _A : Any = n_layer _A : Tuple = n_head _A : List[str] = n_inner _A : Optional[int] = activation_function _A : Optional[int] = resid_pdrop _A : List[str] = embd_pdrop _A : int = attn_pdrop _A : str = layer_norm_epsilon _A : int = initializer_range _A : List[str] = scale_attn_weights _A : Optional[int] = use_cache _A : Any = attention_softmax_in_fpaa _A : Dict = scale_attention_softmax_in_fpaa _A : Any = multi_query _A : Any = bos_token_id _A : Union[str, Any] = eos_token_id super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] _A : Union[str, Any] = [] _A : Optional[int] = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator _A : Dict = len(snake_case_ ) if (len(snake_case_ ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ),"""Stack""".center(snake_case_ ),"""Postfix""".center(snake_case_ ),sep=""" | """,) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(snake_case_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(snake_case_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(snake_case_ ) == 0: stack.append(snake_case_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(snake_case_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(snake_case_ ) # push x to stack print( x.center(8 ),("""""".join(snake_case_ )).ljust(snake_case_ ),("""""".join(snake_case_ )).ljust(snake_case_ ),sep=""" | """,) # Output in tabular format while len(snake_case_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ),("""""".join(snake_case_ )).ljust(snake_case_ ),("""""".join(snake_case_ )).ljust(snake_case_ ),sep=""" | """,) # Output in tabular format return "".join(snake_case_ ) # return Postfix as str def lowerCAmelCase_ ( snake_case_ ): _A : str = list(infix[::-1] ) # reverse the infix equation for i in range(len(snake_case_ ) ): if infix[i] == "(": _A : Any = """)""" # change "(" to ")" elif infix[i] == ")": _A : int = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(snake_case_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": _snake_case = input("\nEnter an Infix Equation = ") # Input an Infix equation _snake_case = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
import argparse import json from tqdm import tqdm def lowerCAmelCase_ ( ): _A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""",type=snake_case_,default="""biencoder-nq-dev.json""",help="""Path to raw DPR training data""",) parser.add_argument( """--evaluation_set""",type=snake_case_,help="""where to store parsed evaluation_set file""",) parser.add_argument( """--gold_data_path""",type=snake_case_,help="""where to store parsed gold_data_path file""",) _A : str = parser.parse_args() with open(args.src_path,"""r""" ) as src_file, open(args.evaluation_set,"""w""" ) as eval_file, open( args.gold_data_path,"""w""" ) as gold_file: _A : List[Any] = json.load(snake_case_ ) for dpr_record in tqdm(snake_case_ ): _A : Union[str, Any] = dpr_record["""question"""] _A : List[str] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(snake_case_ ) + """\n""" ) if __name__ == "__main__": main()
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
import random def lowerCAmelCase_ ( snake_case_ ): _A : Optional[Any] = num - 1 _A : Optional[int] = 0 while s % 2 == 0: _A : str = s // 2 t += 1 for _ in range(5 ): _A : Tuple = random.randrange(2,num - 1 ) _A : Dict = pow(snake_case_,snake_case_,snake_case_ ) if v != 1: _A : int = 0 while v != (num - 1): if i == t - 1: return False else: _A : int = i + 1 _A : Dict = (v**2) % num return True def lowerCAmelCase_ ( snake_case_ ): if num < 2: return False _A : int = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(snake_case_ ) def lowerCAmelCase_ ( snake_case_ = 1024 ): while True: _A : List[Any] = random.randrange(2 ** (keysize - 1),2 ** (keysize) ) if is_prime_low_num(snake_case_ ): return num if __name__ == "__main__": _snake_case = generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num)))
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _snake_case = logging.get_logger(__name__) class lowercase ( enum.Enum ): _a = 0 _a = 1 @add_end_docstrings(UpperCamelCase__ ) class lowercase ( UpperCamelCase__ ): _a = "generated" def __init__( self , *_a , **_a ) -> Optional[int]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> str: _A : Dict = {} if truncation is not None: _A : Union[str, Any] = truncation _A : Optional[Any] = generate_kwargs _A : List[str] = {} if return_tensors is not None and return_type is None: _A : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: _A : Union[str, Any] = return_type if clean_up_tokenization_spaces is not None: _A : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: _A : str = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""" ) _A : Any = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a__ ( self , _a , _a , _a ) -> str: return True def a__ ( self , *_a , _a ) -> List[Any]: _A : Dict = self.model.config.prefix if self.model.config.prefix is not None else """""" if isinstance(args[0] , _a ): if self.tokenizer.pad_token_id is None: raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" ) _A : Optional[int] = ([prefix + arg for arg in args[0]],) _A : Optional[Any] = True elif isinstance(args[0] , _a ): _A : Any = (prefix + args[0],) _A : str = False else: raise ValueError( F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' ) _A : Optional[Any] = self.tokenizer(*_a , padding=_a , truncation=_a , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *_a , **_a ) -> Optional[Any]: _A : int = super().__call__(*_a , **_a ) if ( isinstance(args[0] , _a ) and all(isinstance(_a , _a ) for el in args[0] ) and all(len(_a ) == 1 for res in result ) ): return [res[0] for res in result] return result def a__ ( self , _a , _a=TruncationStrategy.DO_NOT_TRUNCATE , **_a ) -> Tuple: _A : Optional[Any] = self._parse_and_tokenize(_a , truncation=_a , **_a ) return inputs def a__ ( self , _a , **_a ) -> Optional[int]: if self.framework == "pt": _A , _A : Tuple = model_inputs["""input_ids"""].shape elif self.framework == "tf": _A , _A : Dict = tf.shape(model_inputs["""input_ids"""] ).numpy() _A : List[Any] = generate_kwargs.get("""min_length""" , self.model.config.min_length ) _A : Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) self.check_inputs(_a , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] ) _A : str = self.model.generate(**_a , **_a ) _A : Optional[int] = output_ids.shape[0] if self.framework == "pt": _A : Dict = output_ids.reshape(_a , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": _A : List[str] = tf.reshape(_a , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def a__ ( self , _a , _a=ReturnType.TEXT , _a=False ) -> List[Any]: _A : Union[str, Any] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: _A : int = {F'''{self.return_name}_token_ids''': output_ids} elif return_type == ReturnType.TEXT: _A : Optional[int] = { F'''{self.return_name}_text''': self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) } records.append(_a ) return records @add_end_docstrings(UpperCamelCase__ ) class lowercase ( UpperCamelCase__ ): _a = "summary" def __call__( self , *_a , **_a ) -> str: return super().__call__(*_a , **_a ) def a__ ( self , _a , _a , _a ) -> bool: if max_length < min_length: logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' ) if input_length < max_length: logger.warning( F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ''' """a summarization task, where outputs shorter than the input are typically wanted, you might """ F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' ) @add_end_docstrings(UpperCamelCase__ ) class lowercase ( UpperCamelCase__ ): _a = "translation" def a__ ( self , _a , _a , _a ) -> Optional[int]: if input_length > 0.9 * max_length: logger.warning( F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ''' """increasing your max_length manually, e.g. translator('...', max_length=400)""" ) return True def a__ ( self , *_a , _a=TruncationStrategy.DO_NOT_TRUNCATE , _a=None , _a=None ) -> Optional[int]: if getattr(self.tokenizer , """_build_translation_inputs""" , _a ): return self.tokenizer._build_translation_inputs( *_a , return_tensors=self.framework , truncation=_a , src_lang=_a , tgt_lang=_a ) else: return super()._parse_and_tokenize(*_a , truncation=_a ) def a__ ( self , _a=None , _a=None , **_a ) -> List[Any]: _A , _A , _A : str = super()._sanitize_parameters(**_a ) if src_lang is not None: _A : Optional[Any] = src_lang if tgt_lang is not None: _A : Any = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. _A : Any = kwargs.get("""task""" , self.task ) _A : Union[str, Any] = task.split("""_""" ) if task and len(_a ) == 4: # translation, XX, to YY _A : int = items[1] _A : Dict = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *_a , **_a ) -> Union[str, Any]: return super().__call__(*_a , **_a )
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase ( UpperCamelCase__ ): def __init__( self , *_a , _a=None , _a=None , **_a ) -> Dict: super().__init__(*_a , **_a ) _A : Optional[int] = eval_examples _A : Tuple = post_process_function def a__ ( self , _a = None , _a=None , _a = None , _a = "eval" , **_a , ) -> Dict[str, float]: _A : Any = gen_kwargs.copy() _A : Tuple = ( gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length ) _A : Tuple = ( gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams ) _A : List[str] = gen_kwargs _A : Dict = self.eval_dataset if eval_dataset is None else eval_dataset _A : Optional[int] = self.get_eval_dataloader(_a ) _A : Any = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A : List[str] = self.compute_metrics _A : Tuple = None _A : str = time.time() _A : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A : Optional[int] = eval_loop( _a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , ) finally: _A : List[str] = compute_metrics _A : Tuple = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A : Tuple = self.post_process_function(_a , _a , _a ) _A : Optional[int] = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A : int = metrics.pop(_a ) metrics.update(output.metrics ) else: _A : Union[str, Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_a ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a ) return metrics def a__ ( self , _a , _a , _a=None , _a = "test" , **_a ) -> Tuple: _A : List[str] = gen_kwargs.copy() _A : Optional[Any] = self.get_test_dataloader(_a ) # Temporarily disable metric computation, we will do it in the loop here. _A : List[Any] = self.compute_metrics _A : int = None _A : Union[str, Any] = time.time() _A : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A : Dict = eval_loop( _a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , ) finally: _A : int = compute_metrics _A : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A : Union[str, Any] = self.post_process_function(_a , _a , _a , """predict""" ) _A : Tuple = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A : Optional[Any] = metrics.pop(_a ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_=False ): _A : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False ): for i in range(config.num_hidden_layers ): if base_model: _A : Tuple = """""" else: _A : Any = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _A : str = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] _A : Dict = in_proj_bias[: config.hidden_size] _A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A : str = in_proj_weight[ -config.hidden_size :, : ] _A : int = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( snake_case_ ): _A : Optional[int] = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : str = val def lowerCAmelCase_ ( ): _A : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=True ): _A : List[str] = ViTConfig() # patch_size if model_name[-1] == "8": _A : Optional[Any] = 8 # set labels if required if not base_model: _A : Any = 1000 _A : Any = """huggingface/label-files""" _A : str = """imagenet-1k-id2label.json""" _A : Any = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : List[str] = {int(snake_case_ ): v for k, v in idalabel.items()} _A : List[str] = idalabel _A : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: _A : Any = 384 _A : List[str] = 1536 _A : List[Any] = 12 _A : Optional[int] = 6 # load original model from torch hub _A : str = torch.hub.load("""facebookresearch/dino:main""",snake_case_ ) original_model.eval() # load state_dict of original model, remove and rename some keys _A : int = original_model.state_dict() if base_model: remove_classification_head_(snake_case_ ) _A : str = create_rename_keys(snake_case_,base_model=snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) read_in_q_k_v(snake_case_,snake_case_,snake_case_ ) # load HuggingFace model if base_model: _A : List[str] = ViTModel(snake_case_,add_pooling_layer=snake_case_ ).eval() else: _A : Tuple = ViTForImageClassification(snake_case_ ).eval() model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by ViTImageProcessor _A : List[str] = ViTImageProcessor() _A : Dict = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[int] = encoding["""pixel_values"""] _A : Optional[int] = model(snake_case_ ) if base_model: _A : Union[str, Any] = original_model(snake_case_ ) assert torch.allclose(snake_case_,outputs.last_hidden_state[:, 0, :],atol=1e-1 ) else: _A : Any = original_model(snake_case_ ) assert logits.shape == outputs.logits.shape assert torch.allclose(snake_case_,outputs.logits,atol=1e-3 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dino_vitb16", type=str, help="Name of the model trained with DINO you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--base_model", action="store_true", help="Whether to only convert the base model (no projection head weights).", ) parser.set_defaults(base_model=True) _snake_case = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1
import torch from diffusers import DiffusionPipeline class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a ) -> Optional[Any]: super().__init__() self.register_modules(unet=_a , scheduler=_a ) def __call__( self ) -> Dict: _A : str = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) _A : Any = 1 _A : List[Any] = self.unet(_a , _a ).sample _A : Dict = self.scheduler.step(_a , _a , _a ).prev_sample _A : int = scheduler_output - scheduler_output + torch.ones_like(_a ) return result
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowercase : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[str]: _A : Optional[Any] = parent _A : int = batch_size _A : List[Any] = seq_length _A : List[Any] = is_training _A : str = use_input_mask _A : List[Any] = use_token_type_ids _A : Any = use_labels _A : Dict = vocab_size _A : List[str] = hidden_size _A : Optional[int] = num_hidden_layers _A : List[str] = num_attention_heads _A : Tuple = intermediate_size _A : Optional[int] = hidden_act _A : Optional[int] = hidden_dropout_prob _A : Any = attention_probs_dropout_prob _A : str = max_position_embeddings _A : Any = type_vocab_size _A : Optional[Any] = type_sequence_label_size _A : Optional[Any] = initializer_range _A : Any = num_labels _A : int = num_choices _A : Any = scope def a__ ( self ) -> List[Any]: _A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A : Union[str, Any] = None if self.use_input_mask: _A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _A : Tuple = None if self.use_token_type_ids: _A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A : Dict = None _A : List[Any] = None _A : Dict = None if self.use_labels: _A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _A : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self ) -> List[Any]: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _A : Optional[int] = LlamaModel(config=_a ) model.to(_a ) model.eval() _A : int = model(_a , attention_mask=_a ) _A : str = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]: _A : Union[str, Any] = True _A : Any = LlamaModel(_a ) model.to(_a ) model.eval() _A : int = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) _A : Optional[int] = model( _a , attention_mask=_a , encoder_hidden_states=_a , ) _A : Optional[int] = model(_a , attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int: _A : List[Any] = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() _A : Optional[Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any: _A : Optional[Any] = True _A : Tuple = True _A : str = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass _A : Union[str, Any] = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , ) _A : str = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _A : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) _A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) _A : Dict = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0] _A : Optional[int] = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0] # select random slice _A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _A : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def a__ ( self ) -> int: _A : List[Any] = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : Any = config_and_inputs _A : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _a = (LlamaForCausalLM,) if is_torch_available() else () _a = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _a = False _a = False def a__ ( self ) -> List[str]: _A : Union[str, Any] = LlamaModelTester(self ) _A : Dict = ConfigTester(self , config_class=_a , hidden_size=37 ) def a__ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self ) -> Union[str, Any]: _A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A : Optional[Any] = type self.model_tester.create_and_check_model(*_a ) def a__ ( self ) -> List[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() _A : List[str] = 3 _A : Optional[Any] = input_dict["""input_ids"""] _A : Tuple = input_ids.ne(1 ).to(_a ) _A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _A : Tuple = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() _A : int = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a__ ( self ) -> Tuple: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : int = 3 _A : Union[str, Any] = """single_label_classification""" _A : Union[str, Any] = input_dict["""input_ids"""] _A : int = input_ids.ne(1 ).to(_a ) _A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _A : Optional[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() _A : Union[str, Any] = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a__ ( self ) -> str: _A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _A : List[str] = 3 _A : Tuple = """multi_label_classification""" _A : List[str] = input_dict["""input_ids"""] _A : Union[str, Any] = input_ids.ne(1 ).to(_a ) _A : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _A : Optional[int] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() _A : Tuple = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def a__ ( self ) -> Dict: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def a__ ( self , _a ) -> Any: _A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common() _A : Dict = ids_tensor([1, 10] , config.vocab_size ) _A : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _A : str = LlamaModel(_a ) original_model.to(_a ) original_model.eval() _A : Dict = original_model(_a ).last_hidden_state _A : Union[str, Any] = original_model(_a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _A : Optional[int] = {"""type""": scaling_type, """factor""": 10.0} _A : Optional[Any] = LlamaModel(_a ) scaled_model.to(_a ) scaled_model.eval() _A : Tuple = scaled_model(_a ).last_hidden_state _A : Any = scaled_model(_a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) ) @require_torch class lowercase ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def a__ ( self ) -> Dict: _A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338] _A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) _A : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 _A : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _A : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def a__ ( self ) -> Any: _A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338] _A : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) _A : Dict = model(torch.tensor(_a ) ) # Expected mean on dim = -1 _A : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _A : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def a__ ( self ) -> Optional[Any]: _A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338] _A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) _A : Optional[int] = model(torch.tensor(_a ) ) # Expected mean on dim = -1 _A : Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _A : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def a__ ( self ) -> Optional[Any]: _A : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338] _A : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) _A : int = model(torch.tensor(_a ) ) _A : Tuple = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 ) # fmt: off _A : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def a__ ( self ) -> Any: _A : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" _A : List[Any] = """Simply put, the theory of relativity states that """ _A : Dict = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) _A : List[str] = tokenizer.encode(_a , return_tensors="""pt""" ) _A : Optional[Any] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_a ) # greedy generation outputs _A : Optional[Any] = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a ) _A : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a ) self.assertEqual(_a , _a )
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: # A mock response for an HTTP head request to emulate server down _A : Optional[int] = mock.Mock() _A : Optional[Any] = 500 _A : Dict = {} _A : Union[str, Any] = HTTPError _A : List[Any] = {} # Download this model to make sure it's in the cache. _A : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head: _A : List[Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def a__ ( self ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down _A : str = mock.Mock() _A : Any = 500 _A : Optional[int] = {} _A : List[str] = HTTPError _A : int = {} # Download this model to make sure it's in the cache. _A : int = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head: _A : str = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def a__ ( self ) -> Optional[int]: # This test is for deprecated behavior and can be removed in v5 try: _A : Tuple = tempfile.mktemp() with open(_a , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , _a ) _A : str = AlbertTokenizer.from_pretrained(_a ) finally: os.remove(_a ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , _a ) _A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def a__ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 _A : Dict = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class lowercase ( unittest.TestCase ): _a = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def a__ ( cls ) -> Tuple: _A : int = TOKEN HfFolder.save_token(_a ) @classmethod def a__ ( cls ) -> List[Any]: try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : Optional[Any] = BertTokenizer(_a ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) _A : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a , repo_id="""test-tokenizer""" , push_to_hub=_a , use_auth_token=self._token ) _A : List[str] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[int] = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : Tuple = BertTokenizer(_a ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) _A : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _a , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=_a , use_auth_token=self._token ) _A : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def a__ ( self ) -> str: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : str = CustomTokenizer(_a ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) _A : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _A : Any = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : Dict = BertTokenizerFast.from_pretrained(_a ) bert_tokenizer.save_pretrained(_a ) _A : Dict = CustomTokenizerFast.from_pretrained(_a ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) _A : Tuple = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) _A : Tuple = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=_a , trust_remote_code=_a ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class lowercase ( unittest.TestCase ): def a__ ( self ) -> List[Any]: _A : Optional[Any] = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def a__ ( self ) -> Union[str, Any]: _A : List[Any] = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def a__ ( self ) -> Dict: _A : List[Any] = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def a__ ( self ) -> Dict: _A : Dict = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def a__ ( self ) -> List[Any]: _A : Dict = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def a__ ( self ) -> int: _A : Any = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def a__ ( self ) -> List[Any]: # Even if the offsets are wrong, we necessarily output correct string # parts. _A : Tuple = Trie() _A : int = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_a , ["""AB""", """C"""] )
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
import os import pytest from transformers.dynamic_module_utils import get_imports _snake_case = "\nimport os\n" _snake_case = "\ndef foo():\n import os\n return False\n" _snake_case = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _snake_case = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _snake_case = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _snake_case = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _snake_case = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _snake_case = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _snake_case = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _snake_case = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _snake_case = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("""case""",snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Tuple = os.path.join(snake_case_,"""test_file.py""" ) with open(snake_case_,"""w""" ) as _tmp_file: _tmp_file.write(snake_case_ ) _A : Optional[Any] = get_imports(snake_case_ ) assert parsed_imports == ["os"]
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ): if index == r: for j in range(snake_case_ ): print(data[j],end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location _A : Tuple = arr[i] combination_util(snake_case_,snake_case_,snake_case_,index + 1,snake_case_,i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # A temporary array to store all combination one by one _A : str = [0] * r # Print all combination using temporary array 'data[]' combination_util(snake_case_,snake_case_,snake_case_,0,snake_case_,0 ) if __name__ == "__main__": # Driver code to check the function above _snake_case = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Optional[int]: _A : Optional[int] = size if size is not None else {"""shortest_edge""": 18} _A : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _A : Tuple = parent _A : int = batch_size _A : int = num_channels _A : int = image_size _A : Optional[Any] = min_resolution _A : Any = max_resolution _A : Any = do_resize _A : Any = size _A : Optional[Any] = do_center_crop _A : str = crop_size _A : Union[str, Any] = do_normalize _A : Any = image_mean _A : Dict = image_std def a__ ( self ) -> List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = LevitImageProcessor if is_vision_available() else None def a__ ( self ) -> List[Any]: _A : Optional[Any] = LevitImageProcessingTester(self ) @property def a__ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Tuple: _A : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """do_center_crop""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) def a__ ( self ) -> Optional[Any]: _A : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Tuple: # Initialize image_processing _A : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def a__ ( self ) -> str: # Initialize image_processing _A : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _A : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
from math import isclose, sqrt def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : str = point_y / 4 / point_x _A : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) _A : Dict = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _A : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _A : List[Any] = outgoing_gradient**2 + 4 _A : Union[str, Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _A : List[str] = (point_y - outgoing_gradient * point_x) ** 2 - 100 _A : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _A : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _A : Optional[int] = x_minus if isclose(snake_case_,snake_case_ ) else x_plus _A : str = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowerCAmelCase_ ( snake_case_ = 1.4,snake_case_ = -9.6 ): _A : int = 0 _A : float = first_x_coord _A : float = first_y_coord _A : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): _A , _A , _A : Tuple = next_point(snake_case_,snake_case_,snake_case_ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f"""{solution() = }""")
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _snake_case = "bart" _snake_case = True @st.cache(allow_output_mutation=snake_case_ ) def lowerCAmelCase_ ( ): if LOAD_DENSE_INDEX: _A : Optional[int] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" ) _A : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" ) _A : str = qar_model.eval() else: _A , _A : Dict = (None, None) if MODEL_TYPE == "bart": _A : Tuple = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" ) _A : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" ) _A : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" ) sas_model.load_state_dict(save_dict["""model"""] ) _A : Tuple = sas_model.eval() else: _A , _A : Optional[int] = make_qa_sas_model( model_name="""t5-small""",from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""",device="""cuda:0""" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=snake_case_ ) def lowerCAmelCase_ ( ): if LOAD_DENSE_INDEX: _A : int = faiss.StandardGpuResources() _A : int = datasets.load_dataset(path="""wiki_snippets""",name="""wiki40b_en_100_0""" )["""train"""] _A : List[str] = np.memmap( """wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""",dtype="""float32""",mode="""r""",shape=(wikiaab_passages.num_rows, 128),) _A : Any = faiss.IndexFlatIP(128 ) _A : Union[str, Any] = faiss.index_cpu_to_gpu(snake_case_,1,snake_case_ ) wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU else: _A , _A : List[str] = (None, None) _A : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = datasets.load_dataset("""eli5""",name="""LFQA_reddit""" ) _A : Optional[int] = elia["""train_eli5"""] _A : int = np.memmap( """eli5_questions_reps.dat""",dtype="""float32""",mode="""r""",shape=(elia_train.num_rows, 128) ) _A : int = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(snake_case_ ) return (elia_train, eli5_train_q_index) _snake_case , _snake_case , _snake_case = load_indexes() _snake_case , _snake_case , _snake_case , _snake_case = load_models() _snake_case , _snake_case = load_train_data() def lowerCAmelCase_ ( snake_case_,snake_case_=10 ): _A : Union[str, Any] = embed_questions_for_retrieval([question],snake_case_,snake_case_ ) _A , _A : Optional[Any] = eli5_train_q_index.search(snake_case_,snake_case_ ) _A : int = [elia_train[int(snake_case_ )] for i in I[0]] return nn_examples def lowerCAmelCase_ ( snake_case_,snake_case_="wiki40b",snake_case_="dense",snake_case_=10 ): if source == "none": _A , _A : List[str] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), []) else: if method == "dense": _A , _A : int = query_qa_dense_index( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ) else: _A , _A : Union[str, Any] = query_es_index( snake_case_,snake_case_,index_name="""english_wiki40b_snippets_100w""",n_results=snake_case_,) _A : Tuple = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] _A : Dict = """question: {} context: {}""".format(snake_case_,snake_case_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda snake_case_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None), } ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=64,snake_case_=256,snake_case_=False,snake_case_=2,snake_case_=0.95,snake_case_=0.8 ): with torch.no_grad(): _A : Union[str, Any] = qa_sas_generate( snake_case_,snake_case_,snake_case_,num_answers=1,num_beams=snake_case_,min_len=snake_case_,max_len=snake_case_,do_sample=snake_case_,temp=snake_case_,top_p=snake_case_,top_k=snake_case_,max_input_length=1024,device="""cuda:0""",)[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar _snake_case = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" _snake_case = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _snake_case = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) _snake_case = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] _snake_case = st.sidebar.checkbox("Demo options") if demo_options: _snake_case = st.sidebar.selectbox( "", action_list, index=3, ) _snake_case = action_list.index(action_st) _snake_case = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) _snake_case = show_type == "Show full text of passages" else: _snake_case = 3 _snake_case = True _snake_case = st.sidebar.checkbox("Retrieval options") if retrieval_options: _snake_case = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) _snake_case = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) _snake_case = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: _snake_case = "wiki40b" _snake_case = "dense" _snake_case = "beam" _snake_case = 2 _snake_case = 64 _snake_case = 256 _snake_case = None _snake_case = None _snake_case = st.sidebar.checkbox("Generation options") if generate_options: _snake_case = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) _snake_case = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) _snake_case = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) _snake_case = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": _snake_case = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _snake_case = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) _snake_case = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) _snake_case = None # start main text _snake_case = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] _snake_case = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": _snake_case = st.text_input("Enter your question here:", "") else: _snake_case = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": _snake_case , _snake_case = make_support(question, source=wiki_source, method="dense", n_results=10) _snake_case , _snake_case = make_support(question, source=wiki_source, method="sparse", n_results=10) _snake_case = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _snake_case = support_list[:10] _snake_case = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: _snake_case , _snake_case = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: _snake_case , _snake_case = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): _snake_case = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) _snake_case = res[1].strip() if sec_titles == "": _snake_case = "[{}]({})".format(res[0], wiki_url) else: _snake_case = sec_titles.split(" & ") _snake_case = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: _snake_case = find_nearest_training(question) _snake_case = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) _snake_case = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) _snake_case = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=64 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[int]: _A : Optional[int] = parent _A : Optional[Any] = batch_size _A : Tuple = seq_length _A : Optional[Any] = is_training _A : List[Any] = use_input_mask _A : Any = use_token_type_ids _A : List[Any] = use_labels _A : Optional[int] = vocab_size _A : str = hidden_size _A : List[str] = embedding_size _A : List[str] = num_hidden_layers _A : List[str] = num_attention_heads _A : Dict = intermediate_size _A : Dict = hidden_act _A : Any = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : str = max_position_embeddings _A : List[str] = type_vocab_size _A : int = type_sequence_label_size _A : Optional[Any] = initializer_range _A : Union[str, Any] = num_labels _A : Any = num_choices _A : List[str] = scope def a__ ( self ) -> Union[str, Any]: _A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A : List[str] = None if self.use_input_mask: _A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) _A : Union[str, Any] = None if self.use_token_type_ids: _A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A : Any = None _A : List[Any] = None _A : Optional[Any] = None if self.use_labels: _A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _A : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self ) -> Optional[int]: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any: _A : Tuple = MegatronBertModel(config=_a ) model.to(_a ) model.eval() _A : str = model(_a , attention_mask=_a , token_type_ids=_a ) _A : Any = model(_a , token_type_ids=_a ) _A : int = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: _A : List[Any] = MegatronBertForMaskedLM(config=_a ) model.to(_a ) model.eval() _A : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : Optional[int] = MegatronBertForCausalLM(config=_a ) model.to(_a ) model.eval() _A : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : List[str] = MegatronBertForNextSentencePrediction(config=_a ) model.to(_a ) model.eval() _A : List[str] = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: _A : Dict = MegatronBertForPreTraining(config=_a ) model.to(_a ) model.eval() _A : str = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: _A : Dict = MegatronBertForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _A : Dict = model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple: _A : Tuple = self.num_labels _A : Any = MegatronBertForSequenceClassification(_a ) model.to(_a ) model.eval() _A : str = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> int: _A : Union[str, Any] = self.num_labels _A : Optional[int] = MegatronBertForTokenClassification(config=_a ) model.to(_a ) model.eval() _A : Dict = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : str = self.num_choices _A : Optional[Any] = MegatronBertForMultipleChoice(config=_a ) model.to(_a ) model.eval() _A : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : Optional[int] = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self ) -> int: _A : Optional[int] = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : int = config_and_inputs _A : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _a = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _a = True # test_resize_embeddings = False _a = False def a__ ( self , _a , _a , _a=False ) -> Optional[int]: _A : List[str] = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class in get_values(_a ): _A : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a ) _A : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) return inputs_dict def a__ ( self ) -> Dict: _A : Dict = MegatronBertModelTester(self ) _A : str = ConfigTester(self , config_class=_a , hidden_size=37 ) def a__ ( self ) -> str: self.config_tester.run_common_tests() def a__ ( self ) -> Optional[int]: _A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*_a ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_a ) def a__ ( self ) -> Dict: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_a ) def a__ ( self ) -> int: _A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_a ) def a__ ( self ) -> List[str]: _A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*_a ) def a__ ( self ) -> int: _A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*_a ) def a__ ( self ) -> Dict: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_a ) def a__ ( self ) -> int: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*_a ) def lowerCAmelCase_ ( snake_case_ ): return torch.tensor( snake_case_,dtype=torch.long,device=snake_case_,) _snake_case = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow @unittest.skip("""Model is not available.""" ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: _A : List[str] = os.path.join(os.environ["""MYDIR"""] , _a ) _A : Any = MegatronBertModel.from_pretrained(_a ) model.to(_a ) model.half() _A : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): _A : Dict = model(_a )[0] _A : List[str] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , _a ) _A : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): _A : Any = output[0, ii, jj] _A : Optional[int] = expected[3 * ii + jj] _A : Tuple = """ii={} jj={} a={} b={}""".format(_a , _a , _a , _a ) self.assertTrue(math.isclose(_a , _a , rel_tol=_a , abs_tol=_a ) , msg=_a )
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) _snake_case = parser.parse_args() _snake_case = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "owlvit_text_model" def __init__( self , _a=4_9408 , _a=512 , _a=2048 , _a=12 , _a=8 , _a=16 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9406 , _a=4_9407 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Dict = vocab_size _A : List[Any] = hidden_size _A : Optional[int] = intermediate_size _A : Optional[Any] = num_hidden_layers _A : List[Any] = num_attention_heads _A : List[Any] = max_position_embeddings _A : str = hidden_act _A : Optional[Any] = layer_norm_eps _A : Optional[int] = attention_dropout _A : Optional[int] = initializer_range _A : Optional[int] = initializer_factor @classmethod def a__ ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _A , _A : Union[str, Any] = cls.get_config_dict(_a , **_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": _A : Dict = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_a , **_a ) class lowercase ( UpperCamelCase__ ): _a = "owlvit_vision_model" def __init__( self , _a=768 , _a=3072 , _a=12 , _a=12 , _a=3 , _a=768 , _a=32 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Dict: super().__init__(**_a ) _A : int = hidden_size _A : Optional[int] = intermediate_size _A : Tuple = num_hidden_layers _A : Optional[Any] = num_attention_heads _A : Union[str, Any] = num_channels _A : Any = image_size _A : str = patch_size _A : List[Any] = hidden_act _A : Optional[int] = layer_norm_eps _A : Optional[int] = attention_dropout _A : List[str] = initializer_range _A : List[str] = initializer_factor @classmethod def a__ ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _A , _A : Tuple = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": _A : List[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_a , **_a ) class lowercase ( UpperCamelCase__ ): _a = "owlvit" _a = True def __init__( self , _a=None , _a=None , _a=512 , _a=2.6592 , _a=True , **_a , ) -> Optional[Any]: super().__init__(**_a ) if text_config is None: _A : str = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: _A : int = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) _A : Dict = OwlViTTextConfig(**_a ) _A : int = OwlViTVisionConfig(**_a ) _A : Any = projection_dim _A : List[Any] = logit_scale_init_value _A : Any = return_dict _A : List[str] = 1.0 @classmethod def a__ ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _A , _A : Optional[Any] = cls.get_config_dict(_a , **_a ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_a , **_a ) @classmethod def a__ ( cls , _a , _a , **_a ) -> List[str]: _A : Dict = {} _A : Dict = text_config _A : Optional[Any] = vision_config return cls.from_dict(_a , **_a ) def a__ ( self ) -> List[Any]: _A : Dict = copy.deepcopy(self.__dict__ ) _A : Dict = self.text_config.to_dict() _A : Optional[Any] = self.vision_config.to_dict() _A : List[str] = self.__class__.model_type return output class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def a__ ( self ) -> float: return 1e-4 def a__ ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]: _A : Tuple = super().generate_dummy_inputs( processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a ) _A : Any = super().generate_dummy_inputs( processor.image_processor , batch_size=_a , framework=_a ) return {**text_input_dict, **image_input_dict} @property def a__ ( self ) -> int: return 14
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
from __future__ import annotations def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[str] = [] _A , _A : int = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _A : List[Any] = result + left + right return input_list def lowerCAmelCase_ ( snake_case_ ): if len(snake_case_ ) <= 1: return input_list _A : Dict = list(snake_case_ ) # iteration for two-way merging _A : Union[str, Any] = 2 while p <= len(snake_case_ ): # getting low, high and middle value for merge-sort of single list for i in range(0,len(snake_case_ ),snake_case_ ): _A : Tuple = i _A : Optional[Any] = i + p - 1 _A : Optional[int] = (low + high + 1) // 2 _A : Dict = merge(snake_case_,snake_case_,snake_case_,snake_case_ ) # final merge of last two parts if p * 2 >= len(snake_case_ ): _A : Dict = i _A : str = merge(snake_case_,0,snake_case_,len(snake_case_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _snake_case = input("Enter numbers separated by a comma:\n").strip() if user_input == "": _snake_case = [] else: _snake_case = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib _snake_case = get_logger() _snake_case = None class lowercase ( TensorFormatter[Mapping, "jax.Array", Mapping] ): def __init__( self , _a=None , _a=None , **_a ) -> Dict: super().__init__(features=_a ) import jax from jaxlib.xla_client import Device if isinstance(_a , _a ): raise ValueError( F'''Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` ''' """is not serializable neither with `pickle` nor with `dill`. Instead you can surround """ """the device with `str()` to get its string identifier that will be internally mapped """ """to the actual `jaxlib.xla_extension.Device`.""" ) _A : Tuple = device if isinstance(_a , _a ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _A : int = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F'''Device with string identifier {self.device} not listed among the available ''' F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' F'''device: {str(jax.devices()[0] )}.''' ) _A : Dict = str(jax.devices()[0] ) _A : int = jnp_array_kwargs @staticmethod def a__ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(_a ): device for device in jax.devices()} def a__ ( self , _a ) -> str: import jax import jax.numpy as jnp if isinstance(_a , _a ) and column: if all( isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(_a , axis=0 ) return column def a__ ( self , _a ) -> Optional[int]: import jax import jax.numpy as jnp if isinstance(_a , (str, bytes, type(_a )) ): return value elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _A : Any = {} if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _A : List[str] = {"""dtype""": jnp.intaa} else: _A : Optional[int] = {"""dtype""": jnp.intaa} elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _A : List[str] = {"""dtype""": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(_a , PIL.Image.Image ): _A : Union[str, Any] = np.asarray(_a ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _A : Optional[Any] = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} ) def a__ ( self , _a ) -> Optional[int]: import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(_a , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(_a , """__array__""" ) and not isinstance(_a , jax.Array ): _A : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(_a , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] ) elif isinstance(_a , (list, tuple) ): return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] ) return self._tensorize(_a ) def a__ ( self , _a ) -> Tuple: return map_nested(self._recursive_tensorize , _a , map_list=_a ) def a__ ( self , _a ) -> Mapping: _A : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_a ) _A : Any = self.python_features_decoder.decode_row(_a ) return self.recursive_tensorize(_a ) def a__ ( self , _a ) -> "jax.Array": _A : Optional[int] = self.numpy_arrow_extractor().extract_column(_a ) _A : List[Any] = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] ) _A : List[Any] = self.recursive_tensorize(_a ) _A : Optional[int] = self._consolidate(_a ) return column def a__ ( self , _a ) -> Mapping: _A : List[Any] = self.numpy_arrow_extractor().extract_batch(_a ) _A : List[Any] = self.python_features_decoder.decode_batch(_a ) _A : Tuple = self.recursive_tensorize(_a ) for column_name in batch: _A : Union[str, Any] = self._consolidate(batch[column_name] ) return batch
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
from importlib import import_module from .logging import get_logger UpperCAmelCase__ = get_logger(__name__) class lowercase_ : '''simple docstring''' def __init__( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None ) ->int: """simple docstring""" a = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) a = module._original_module if isinstance(__UpperCAmelCase , _PatchedModuleObj ) else module class lowercase_ : '''simple docstring''' __snake_case = [] def __init__( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None ) ->Union[str, Any]: """simple docstring""" a = obj a = target a = new a = target.split('''.''' )[0] a = {} a = attrs or [] def __enter__( self : Tuple ) ->Union[str, Any]: """simple docstring""" *a , a = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__UpperCAmelCase ) ): try: a = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): a = getattr(self.obj , __UpperCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): a = obj_attr # patch at top level setattr(self.obj , __UpperCAmelCase , _PatchedModuleObj(__UpperCAmelCase , attrs=self.attrs ) ) a = getattr(self.obj , __UpperCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__UpperCAmelCase , __UpperCAmelCase , _PatchedModuleObj(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , attrs=self.attrs ) ) a = getattr(__UpperCAmelCase , __UpperCAmelCase ) # finally set the target attribute setattr(__UpperCAmelCase , __UpperCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: a = getattr(import_module('''.'''.join(__UpperCAmelCase ) ) , __UpperCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __UpperCAmelCase ) is attr_value: a = getattr(self.obj , __UpperCAmelCase ) setattr(self.obj , __UpperCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" a = globals()['''__builtins__'''][target_attr] setattr(self.obj , __UpperCAmelCase , self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : List[str] , *__UpperCAmelCase : Optional[int] ) ->Tuple: """simple docstring""" for attr in list(self.original ): setattr(self.obj , __UpperCAmelCase , self.original.pop(__UpperCAmelCase ) ) def __lowerCAmelCase ( self : Any ) ->List[Any]: """simple docstring""" self.__enter__() self._active_patches.append(self ) def __lowerCAmelCase ( self : Any ) ->Union[str, Any]: """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
0
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_: Dict =get_tests_dir('fixtures/dummy-config.json') class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Union[str, Any] ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def _lowercase (self : int ): UpperCAmelCase_ = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(__a , __a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoConfig.for_model("roberta" ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCAmelCase_ = os.path.join(__a , "fake-roberta" ) os.makedirs(__a , exist_ok=__a ) with open(os.path.join(__a , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertEqual(type(__a ) , __a ) def _lowercase (self : str ): try: AutoConfig.register("custom" , __a ) # Wrong model type will raise an error with self.assertRaises(__a ): AutoConfig.register("model" , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoConfig.register("bert" , __a ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a ) UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoConfig.from_pretrained("bert-base" ) def _lowercase (self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoConfig.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def _lowercase (self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a ) UpperCAmelCase_ = AutoConfig.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def _lowercase (self : Tuple ): class __A ( UpperCamelCase__ ): a__ : str = """new-model""" try: AutoConfig.register("new-model" , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = tempfile.mkdtemp() lowercase__ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowercase__ = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } lowercase__ = os.path.join(self.tmpdirname , UpperCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : Tuple , **UpperCamelCase : List[str] ): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : List[Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = self.get_image_processor() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase ) lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 ) lowercase__ = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = self.prepare_image_inputs() lowercase__ = image_processor(UpperCamelCase , return_tensors='''np''' ) lowercase__ = processor(images=UpperCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = '''lower newer''' lowercase__ = processor(text=UpperCamelCase ) lowercase__ = tokenizer(UpperCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = '''lower newer''' lowercase__ = self.prepare_image_inputs() lowercase__ = processor(text=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase ): processor() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ = processor.batch_decode(UpperCamelCase ) lowercase__ = tokenizer.batch_decode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = '''lower newer''' lowercase__ = self.prepare_image_inputs() lowercase__ = processor(text=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
0
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
0
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class UpperCAmelCase_ ( __lowercase ): def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]: return self[-1] < other[-1] def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple: return self[-1] == other[-1] def a_ ( lowerCamelCase : list ): lowerCAmelCase = [] # sort into stacks for element in collection: lowerCAmelCase = Stack([element] ) lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase ) if i != len(lowerCamelCase ): stacks[i].append(lowerCamelCase ) else: stacks.append(lowerCamelCase ) # use a heap-based merge to merge stack efficiently lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": __snake_case =input("""Enter numbers separated by a comma:\n""").strip() __snake_case =[int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
4
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
0
from random import shuffle import tensorflow as tf from numpy import array def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Dict: """simple docstring""" _lowercase =int(__snake_case ) assert noofclusters < len(__snake_case ) # Find out the dimensionality _lowercase =len(vectors[0] ) # Will help select random centroids from among the available vectors _lowercase =list(range(len(__snake_case ) ) ) shuffle(__snake_case ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _lowercase =tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _lowercase =tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _lowercase =[ tf.Variable(vectors[vector_indices[i]] ) for i in range(__snake_case ) ] ##These nodes will assign the centroid Variables the appropriate ##values _lowercase =tf.placeholder('''float64''' , [dim] ) _lowercase =[] for centroid in centroids: cent_assigns.append(tf.assign(__snake_case , __snake_case ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _lowercase =[tf.Variable(0 ) for i in range(len(__snake_case ) )] ##These nodes will assign an assignment Variable the appropriate ##value _lowercase =tf.placeholder('''int32''' ) _lowercase =[] for assignment in assignments: cluster_assigns.append(tf.assign(__snake_case , __snake_case ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _lowercase =tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _lowercase =tf.reduce_mean(__snake_case , 0 ) ##Node for computing Euclidean distances # Placeholders for input _lowercase =tf.placeholder('''float''' , [dim] ) _lowercase =tf.placeholder('''float''' , [dim] ) _lowercase =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__snake_case , __snake_case ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _lowercase =tf.placeholder('''float''' , [noofclusters] ) _lowercase =tf.argmin(__snake_case , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _lowercase =tf.initialize_all_variables() # Initialize all variables sess.run(__snake_case ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _lowercase =100 for _ in range(__snake_case ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(__snake_case ) ): _lowercase =vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _lowercase =[ sess.run(__snake_case , feed_dict={va: vect, va: sess.run(__snake_case )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _lowercase =sess.run( __snake_case , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(__snake_case ): # Collect all the vectors assigned to this cluster _lowercase =[ vectors[i] for i in range(len(__snake_case ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _lowercase =sess.run( __snake_case , feed_dict={mean_input: array(__snake_case )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _lowercase =sess.run(__snake_case ) _lowercase =sess.run(__snake_case ) return centroids, assignments
5
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A : List[str] = '▁' A : str = {'vocab_file': 'spiece.model'} A : Dict = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'} } A : Tuple = { 'google/pegasus-xsum': 5_1_2, } A : Tuple = logging.get_logger(__name__) class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<mask_2>" , _snake_case="<mask_1>" , _snake_case=None , _snake_case=103 , _snake_case = None , **_snake_case , ) -> None: '''simple docstring''' __a = offset if additional_special_tokens is not None: if not isinstance(_snake_case , _snake_case ): raise TypeError( F"""additional_special_tokens should be of type {type(_snake_case )}, but is""" F""" {type(_snake_case )}""" ) __a = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"""<unk_{i}>""" for i in range(len(_snake_case ) , self.offset - 1 ) ] if len(set(_snake_case ) ) != len(_snake_case ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) __a = additional_special_tokens_extended else: __a = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )] __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) __a = mask_token_sent __a = vocab_file __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # add special tokens to encoder dict __a = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __a = {v: k for k, v in self.encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return len(self.sp_model ) + self.offset def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, int]: '''simple docstring''' __a = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: '''simple docstring''' __a = self.__dict__.copy() __a = None return state def __setstate__( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_snake_case , out_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __a = self.sp_model.piece_to_id(_snake_case ) return sp_id + self.offset def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __a = self.sp_model.IdToPiece(index - self.offset ) return token def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any: '''simple docstring''' __a = [] __a = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_snake_case ) + token __a = [] else: current_sub_tokens.append(_snake_case ) out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> Any: '''simple docstring''' return 1 def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(_snake_case ) elif token_ids_a is None: return self._special_token_mask(_snake_case ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __a = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case , '''wb''' ) as fi: __a = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
6
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowercase_ = 128022 lowercase_ = 128028 @require_sentencepiece class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = MaMaaaTokenizer lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Tuple )-> Dict: '''simple docstring''' super().setUp() A__ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) ) A__ = Path(self.tmpdirname ) save_json(lowercase_,save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowercase_,save_dir / VOCAB_FILES_NAMES['spm_file'] ) A__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Tuple,**lowercase_ : Any )-> Any: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname,**lowercase_ ) def snake_case__ ( self : Dict,lowercase_ : List[Any] )-> List[str]: '''simple docstring''' return ( "This is a test", "This is a test", ) def snake_case__ ( self : Tuple )-> Optional[Any]: '''simple docstring''' A__ = '</s>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0],'</s>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'<s>' ) self.assertEqual(len(lowercase_ ),tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def snake_case__ ( self : str )-> str: '''simple docstring''' pass def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2, 3, 4, 5, 6],) A__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) A__ = tokenizer.convert_tokens_to_string(lowercase_ ) self.assertEqual(lowercase_,'This is a test' ) @slow def snake_case__ ( self : Dict )-> Union[str, Any]: '''simple docstring''' A__ = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='facebook/m2m100_418M',revision='c168bae485c864188cf9aa0e4108b0b6934dc91e',) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = 'facebook/m2m100_418M' lowerCamelCase = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def snake_case__ ( cls : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name,src_lang='en',tgt_lang='fr' ) A__ = 1 return cls def snake_case__ ( self : Union[str, Any] )-> List[str]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('ar' ),1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id('en' ),1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ),1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ),1_2_8_0_6_3 ) def snake_case__ ( self : Any )-> Optional[int]: '''simple docstring''' A__ = self.tokenizer.get_vocab() self.assertEqual(len(lowercase_ ),self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'],3 ) self.assertIn(self.tokenizer.get_lang_token('en' ),lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = 'en' A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens,lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' self.assertIn(lowercase_,self.tokenizer.all_special_ids ) # fmt: off A__ = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on A__ = self.tokenizer.decode(lowercase_,skip_special_tokens=lowercase_ ) A__ = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_,lowercase_ ) self.assertNotIn(self.tokenizer.eos_token,lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' A__ = tempfile.mkdtemp() A__ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowercase_ ) A__ = MaMaaaTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.lang_token_to_id,lowercase_ ) @require_torch def snake_case__ ( self : List[Any] )-> List[Any]: '''simple docstring''' A__ = 'en' A__ = 'fr' A__ = self.tokenizer(self.src_text,text_target=self.tgt_text,padding=lowercase_,return_tensors='pt' ) A__ = shift_tokens_right( batch['labels'],self.tokenizer.pad_token_id,self.tokenizer.eos_token_id ) for k in batch: A__ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' A__ = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) A__ = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) @require_torch def snake_case__ ( self : Optional[Any] )-> List[str]: '''simple docstring''' A__ = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) A__ = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def snake_case__ ( self : Union[str, Any] )-> Any: '''simple docstring''' A__ = self.tokenizer._build_translation_inputs('A test',return_tensors='pt',src_lang='en',tgt_lang='ar' ) self.assertEqual( nested_simplify(lowercase_ ),{ # en_XX, A, test, EOS 'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 1_2_8_0_0_6, },)
7
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = "dandelin/vilt-b32-finetuned-vqa" SCREAMING_SNAKE_CASE : str = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) SCREAMING_SNAKE_CASE : Any = "image_qa" SCREAMING_SNAKE_CASE : str = AutoProcessor SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForVisualQuestionAnswering SCREAMING_SNAKE_CASE : Optional[int] = ["image", "text"] SCREAMING_SNAKE_CASE : List[Any] = ["text"] def __init__( self : Optional[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]: requires_backends(self , ['''vision'''] ) super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : "Image" , _UpperCamelCase : str ) ->Union[str, Any]: return self.pre_processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' ) def snake_case__( self : Optional[int] , _UpperCamelCase : Dict ) ->int: with torch.no_grad(): return self.model(**_UpperCamelCase ).logits def snake_case__( self : Optional[Any] , _UpperCamelCase : Tuple ) ->Tuple: snake_case_ = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
8
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
0
from __future__ import annotations from dataclasses import dataclass @dataclass class _lowercase : '''simple docstring''' SCREAMING_SNAKE_CASE__ : float SCREAMING_SNAKE_CASE__ : TreeNode | None = None SCREAMING_SNAKE_CASE__ : TreeNode | None = None def _UpperCamelCase ( lowercase__ ): # Validation def is_valid_tree(lowercase__ ) -> bool: if node is None: return True if not isinstance(lowercase__ , lowercase__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(lowercase__ ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( lowercase__ , lowercase__ , lowercase__ ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , lowercase__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , lowercase__ ) ) return is_binary_search_tree_recursive_check(lowercase__ , -float('''inf''' ) , float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
9
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __A = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __A = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } __A = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } __A = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } __A = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } __A = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } __A = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } __A = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __A = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) __A = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) __A = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__(self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Union[bool, str] = False , UpperCAmelCase_ : Union[bool, str] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Any , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) elif titles is None or texts is None: lowerCamelCase__: Optional[int] =titles if texts is None else texts return super().__call__( UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: List[str] =titles if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) else [titles] lowerCamelCase__: List[str] =texts if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) else [texts] lowerCamelCase__: int =len(UpperCAmelCase_) lowerCamelCase__: List[str] =questions if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) else [questions] * n_passages if len(UpperCAmelCase_) != len(UpperCAmelCase_): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCAmelCase_)} titles and {len(UpperCAmelCase_)} texts.""") lowerCamelCase__: int =super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_)["input_ids"] lowerCamelCase__: List[Any] =super().__call__(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_)["input_ids"] lowerCamelCase__: Any ={ "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ , UpperCAmelCase_) ] } if return_attention_mask is not False: lowerCamelCase__: Any =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) lowerCamelCase__: Dict =attention_mask return self.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : BatchEncoding , UpperCAmelCase_ : DPRReaderOutput , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' lowerCamelCase__: Any =reader_input["input_ids"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =reader_output[:3] lowerCamelCase__: Optional[int] =len(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =sorted(range(UpperCAmelCase_) , reverse=UpperCAmelCase_ , key=relevance_logits.__getitem__) lowerCamelCase__: List[DPRReaderOutput] =[] for doc_id in sorted_docs: lowerCamelCase__: Optional[Any] =list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence lowerCamelCase__: List[str] =sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase__: Optional[Any] =sequence_ids.index(self.pad_token_id) else: lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase_ , top_spans=UpperCAmelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase_ , start_index=UpperCAmelCase_ , end_index=UpperCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase_) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' lowerCamelCase__: str =[] for start_index, start_score in enumerate(UpperCAmelCase_): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x[1] , reverse=UpperCAmelCase_) lowerCamelCase__: List[str] =[] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""") lowerCamelCase__: int =end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase_) == top_spans: break return chosen_span_intervals @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = READER_PRETRAINED_VOCAB_FILES_MAP lowercase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = READER_PRETRAINED_INIT_CONFIGURATION lowercase_ = ["input_ids", "attention_mask"]
10
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
0
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor lowerCAmelCase__ = random.Random() def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): if rng is None: _A : Dict = global_rng _A : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=4_0_0 , __lowerCamelCase=2_0_0_0 , __lowerCamelCase=2_4 , __lowerCamelCase=2_4 , __lowerCamelCase=0.0 , __lowerCamelCase=1_6_0_0_0 , __lowerCamelCase=True , __lowerCamelCase=True , ) -> Tuple: _A : Tuple = parent _A : Any = batch_size _A : List[Any] = min_seq_length _A : List[Any] = max_seq_length _A : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _A : Optional[Any] = feature_size _A : List[Any] = num_mel_bins _A : Optional[int] = padding_value _A : List[Any] = sampling_rate _A : List[Any] = return_attention_mask _A : List[str] = do_normalize def _lowerCamelCase ( self) -> List[Any]: return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowerCamelCase ( self , __lowerCamelCase=False , __lowerCamelCase=False) -> Union[str, Any]: def _flatten(__lowerCamelCase): return list(itertools.chain(*__lowerCamelCase)) if equal_length: _A : List[Any] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size _A : List[Any] = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff) ] if numpify: _A : str = [np.asarray(__lowerCamelCase) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase__ ( a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = SpeechaTextFeatureExtractor if is_speech_available() else None def _lowerCamelCase ( self) -> Any: _A : Dict = SpeechaTextFeatureExtractionTester(self) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0) - 1) < 1e-3)) def _lowerCamelCase ( self) -> Dict: # Tests that all call wrap to encode_plus and batch_encode_plus _A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 _A : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _A : Any = [np.asarray(__lowerCamelCase) for speech_input in speech_inputs] # Test feature size _A : List[Any] = feature_extractor(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input _A : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors="np").input_features _A : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3)) # Test batched _A : Optional[int] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features _A : Optional[int] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase): self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3)) # Test 2-D numpy arrays are batched. _A : int = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)] _A : Optional[Any] = np.asarray(__lowerCamelCase) _A : Dict = feature_extractor(__lowerCamelCase , return_tensors="np").input_features _A : Union[str, Any] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase): self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3)) def _lowerCamelCase ( self) -> Dict: _A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _A : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _A : int = ["longest", "max_length", "do_not_pad"] _A : int = [None, 1_6, None] for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase): _A : Optional[Any] = feature_extractor( __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_attention_mask=__lowerCamelCase) _A : Union[str, Any] = inputs.input_features _A : int = inputs.attention_mask _A : List[str] = [np.sum(__lowerCamelCase) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def _lowerCamelCase ( self) -> Optional[int]: _A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _A : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _A : Any = ["longest", "max_length", "do_not_pad"] _A : str = [None, 1_6, None] for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase): _A : Any = feature_extractor( __lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase) _A : Dict = inputs.input_features _A : str = inputs.attention_mask _A : int = [np.sum(__lowerCamelCase) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def _lowerCamelCase ( self) -> Dict: _A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _A : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _A : Tuple = feature_extractor( __lowerCamelCase , padding="max_length" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , ) _A : Tuple = inputs.input_features _A : Optional[int] = inputs.attention_mask _A : Optional[Any] = np.sum(attention_mask == 1 , axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def _lowerCamelCase ( self) -> Dict: _A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _A : Union[str, Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _A : Optional[int] = feature_extractor( __lowerCamelCase , padding="longest" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , ) _A : List[Any] = inputs.input_features _A : int = inputs.attention_mask _A : Tuple = np.sum(attention_mask == 1 , axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 2_4)) _A : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _A : List[Any] = feature_extractor( __lowerCamelCase , padding="longest" , max_length=1_6 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , ) _A : Optional[int] = inputs.input_features _A : Tuple = inputs.attention_mask _A : List[str] = np.sum(attention_mask == 1 , axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 2_4)) def _lowerCamelCase ( self) -> str: import torch _A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _A : str = np.random.rand(1_0_0 , 3_2).astype(np.floataa) _A : Tuple = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _A : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.floataa) _A : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.floataa) def _lowerCamelCase ( self , __lowerCamelCase) -> str: from datasets import load_dataset _A : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation") # automatic decoding with librispeech _A : Dict = ds.sort("id").select(range(__lowerCamelCase))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _lowerCamelCase ( self) -> Any: # fmt: off _A : Dict = np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ]) # fmt: on _A : Union[str, Any] = self._load_datasamples(1) _A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _A : Tuple = feature_extractor(__lowerCamelCase , return_tensors="pt").input_features self.assertEquals(input_features.shape , (1, 5_8_4, 2_4)) self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCamelCase , atol=1e-4))
11
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
0
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowerCamelCase__( unittest.TestCase): UpperCAmelCase__ : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING UpperCAmelCase__ : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ): __lowerCamelCase = AudioClassificationPipeline(model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ ) # test with a raw waveform __lowerCamelCase = np.zeros((3_40_00,) ) __lowerCamelCase = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Any ): __lowerCamelCase, __lowerCamelCase = examples __lowerCamelCase = audio_classifier(UpperCamelCase_ ) # by default a model is initialized with num_labels=2 self.assertEqual( UpperCamelCase_ , [ {"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )}, {"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )}, ] , ) __lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=1 ) self.assertEqual( UpperCamelCase_ , [ {"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )}, ] , ) self.run_torchaudio(UpperCamelCase_ ) @require_torchaudio def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ): import datasets # test with a local file __lowerCamelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) __lowerCamelCase = dataset[0]["""audio"""]["""array"""] __lowerCamelCase = audio_classifier(UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ {"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )}, {"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )}, ] , ) @require_torch def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = """anton-l/wav2vec2-random-tiny-classifier""" __lowerCamelCase = pipeline("""audio-classification""" , model=UpperCamelCase_ ) __lowerCamelCase = np.ones((80_00,) ) __lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 ) __lowerCamelCase = [ {"""score""": 0.0842, """label""": """no"""}, {"""score""": 0.0838, """label""": """up"""}, {"""score""": 0.0837, """label""": """go"""}, {"""score""": 0.0834, """label""": """right"""}, ] __lowerCamelCase = [ {"""score""": 0.0845, """label""": """stop"""}, {"""score""": 0.0844, """label""": """on"""}, {"""score""": 0.0841, """label""": """right"""}, {"""score""": 0.0834, """label""": """left"""}, ] self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) __lowerCamelCase = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} __lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 ) self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def lowerCAmelCase__ ( self: List[Any] ): import datasets __lowerCamelCase = """superb/wav2vec2-base-superb-ks""" __lowerCamelCase = pipeline("""audio-classification""" , model=UpperCamelCase_ ) __lowerCamelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) __lowerCamelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa ) __lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=3 ) , [ {"""score""": 0.981, """label""": """go"""}, {"""score""": 0.007, """label""": """up"""}, {"""score""": 0.006, """label""": """_unknown_"""}, {"""score""": 0.001, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def lowerCAmelCase__ ( self: Union[str, Any] ): pass
12
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
0
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = XCLIPTextConfig() # derive patch size from model name SCREAMING_SNAKE_CASE_: int = model_name.find("patch" ) SCREAMING_SNAKE_CASE_: List[str] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] ) SCREAMING_SNAKE_CASE_: Union[str, Any] = XCLIPVisionConfig(patch_size=_UpperCAmelCase , num_frames=_UpperCAmelCase ) if "large" in model_name: SCREAMING_SNAKE_CASE_: Union[str, Any] = 7_68 SCREAMING_SNAKE_CASE_: List[str] = 30_72 SCREAMING_SNAKE_CASE_: str = 12 SCREAMING_SNAKE_CASE_: int = 10_24 SCREAMING_SNAKE_CASE_: List[Any] = 40_96 SCREAMING_SNAKE_CASE_: str = 16 SCREAMING_SNAKE_CASE_: Dict = 24 SCREAMING_SNAKE_CASE_: Optional[Any] = 7_68 SCREAMING_SNAKE_CASE_: Any = 30_72 if model_name == "xclip-large-patch14-16-frames": SCREAMING_SNAKE_CASE_: List[Any] = 3_36 SCREAMING_SNAKE_CASE_: Optional[int] = XCLIPConfig.from_text_vision_configs(_UpperCAmelCase , _UpperCAmelCase ) if "large" in model_name: SCREAMING_SNAKE_CASE_: Optional[int] = 7_68 return config def A_ ( _UpperCAmelCase ): # text encoder if name == "token_embedding.weight": SCREAMING_SNAKE_CASE_: int = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" ) if name == "positional_embedding": SCREAMING_SNAKE_CASE_: Any = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "ln_1" in name: SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: SCREAMING_SNAKE_CASE_: List[str] = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("c_fc" , "fc1" ) if "c_proj" in name: SCREAMING_SNAKE_CASE_: Dict = name.replace("c_proj" , "fc2" ) if name.startswith("transformer.resblocks" ): SCREAMING_SNAKE_CASE_: Any = name.replace("transformer.resblocks" , "text_model.encoder.layers" ) if "attn.out_proj" in name and "message" not in name: SCREAMING_SNAKE_CASE_: str = name.replace("attn.out_proj" , "self_attn.out_proj" ) if "ln_final" in name: SCREAMING_SNAKE_CASE_: int = name.replace("ln_final" , "text_model.final_layer_norm" ) # visual encoder if name == "visual.class_embedding": SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" ) if name == "visual.positional_embedding": SCREAMING_SNAKE_CASE_: int = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" ) if name.startswith("visual.transformer.resblocks" ): SCREAMING_SNAKE_CASE_: str = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" ) if "visual.conv1" in name: SCREAMING_SNAKE_CASE_: List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" ) if "visual.ln_pre" in name: SCREAMING_SNAKE_CASE_: Tuple = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" ) if "visual.ln_post" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("visual.ln_post" , "vision_model.post_layernorm" ) if "visual.proj" in name: SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("visual.proj" , "visual_projection.weight" ) if "text_projection" in name: SCREAMING_SNAKE_CASE_: Dict = name.replace("text_projection" , "text_projection.weight" ) # things on top if "prompts_visual_proj" in name: SCREAMING_SNAKE_CASE_: List[Any] = name.replace("prompts_visual_proj" , "prompts_visual_projection" ) if "prompts_visual_ln" in name: SCREAMING_SNAKE_CASE_: List[Any] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" ) # mit if name == "mit.positional_embedding": SCREAMING_SNAKE_CASE_: str = name.replace("positional" , "position" ) if name.startswith("mit.resblocks" ): SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("mit.resblocks" , "mit.encoder.layers" ) # prompts generator if name.startswith("prompts_generator.norm" ): SCREAMING_SNAKE_CASE_: Tuple = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" ) return name def A_ ( _UpperCAmelCase , _UpperCAmelCase ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_: Optional[Any] = orig_state_dict.pop(_UpperCAmelCase ) if "attn.in_proj" in key: SCREAMING_SNAKE_CASE_: Optional[int] = key.split("." ) if key.startswith("visual" ): SCREAMING_SNAKE_CASE_: Optional[int] = key_split[3] SCREAMING_SNAKE_CASE_: Dict = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: SCREAMING_SNAKE_CASE_: Union[str, Any] = val[ :dim, : ] SCREAMING_SNAKE_CASE_: Dict = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: Any = val[ -dim:, : ] else: SCREAMING_SNAKE_CASE_: Dict = val[ :dim ] SCREAMING_SNAKE_CASE_: Optional[Any] = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: List[Any] = val[ -dim: ] else: if "weight" in key: SCREAMING_SNAKE_CASE_: List[str] = val[ :dim, : ] SCREAMING_SNAKE_CASE_: str = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: int = val[ -dim:, : ] else: SCREAMING_SNAKE_CASE_: Dict = val[:dim] SCREAMING_SNAKE_CASE_: int = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: Union[str, Any] = val[-dim:] elif key.startswith("mit" ): SCREAMING_SNAKE_CASE_: List[Any] = key_split[2] SCREAMING_SNAKE_CASE_: Tuple = config.vision_config.mit_hidden_size if "weight" in key: SCREAMING_SNAKE_CASE_: Tuple = val[:dim, :] SCREAMING_SNAKE_CASE_: Dict = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_: Optional[int] = val[-dim:, :] else: SCREAMING_SNAKE_CASE_: List[Any] = val[:dim] SCREAMING_SNAKE_CASE_: List[str] = val[dim : dim * 2] SCREAMING_SNAKE_CASE_: str = val[-dim:] else: SCREAMING_SNAKE_CASE_: Any = key_split[2] SCREAMING_SNAKE_CASE_: Optional[Any] = config.text_config.hidden_size if "weight" in key: SCREAMING_SNAKE_CASE_: List[str] = val[:dim, :] SCREAMING_SNAKE_CASE_: List[Any] = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: List[str] = val[-dim:, :] else: SCREAMING_SNAKE_CASE_: Tuple = val[:dim] SCREAMING_SNAKE_CASE_: List[str] = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: Optional[Any] = val[-dim:] else: SCREAMING_SNAKE_CASE_: Union[str, Any] = rename_key(_UpperCAmelCase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: SCREAMING_SNAKE_CASE_: Dict = val.T SCREAMING_SNAKE_CASE_: Any = val return orig_state_dict def A_ ( _UpperCAmelCase ): if num_frames == 8: SCREAMING_SNAKE_CASE_: Union[str, Any] = "eating_spaghetti_8_frames.npy" elif num_frames == 16: SCREAMING_SNAKE_CASE_: str = "eating_spaghetti.npy" elif num_frames == 32: SCREAMING_SNAKE_CASE_: Union[str, Any] = "eating_spaghetti_32_frames.npy" SCREAMING_SNAKE_CASE_: List[Any] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename=_UpperCAmelCase , repo_type="dataset" , ) SCREAMING_SNAKE_CASE_: str = np.load(_UpperCAmelCase ) return list(_UpperCAmelCase ) def A_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_: Optional[Any] = { # fully supervised kinetics-400 checkpoints "xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth", "xclip-base-patch32-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth" ), "xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth", "xclip-base-patch16-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth" ), "xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb", "xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f", # fully supervised kinetics-600 checkpoints "xclip-base-patch16-kinetics-600": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth" ), "xclip-base-patch16-kinetics-600-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth" ), "xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be", # few shot "xclip-base-patch16-hmdb-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth" ), "xclip-base-patch16-hmdb-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth" ), "xclip-base-patch16-hmdb-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth" ), "xclip-base-patch16-hmdb-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth" ), "xclip-base-patch16-ucf-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth" ), "xclip-base-patch16-ucf-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth" ), "xclip-base-patch16-ucf-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth" ), "xclip-base-patch16-ucf-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth" ), # zero shot "xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth", } SCREAMING_SNAKE_CASE_: Tuple = model_to_url[model_name] SCREAMING_SNAKE_CASE_: int = 8 if "16-frames" in model_name: SCREAMING_SNAKE_CASE_: int = 16 elif "shot" in model_name: SCREAMING_SNAKE_CASE_: int = 32 SCREAMING_SNAKE_CASE_: int = get_xclip_config(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = XCLIPModel(_UpperCAmelCase ) model.eval() if "drive" in checkpoint_url: SCREAMING_SNAKE_CASE_: List[str] = "pytorch_model.bin" gdown.cached_download(_UpperCAmelCase , _UpperCAmelCase , quiet=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = torch.load(_UpperCAmelCase , map_location="cpu" )["model"] else: SCREAMING_SNAKE_CASE_: str = torch.hub.load_state_dict_from_url(_UpperCAmelCase )["model"] SCREAMING_SNAKE_CASE_: Union[str, Any] = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = XCLIPModel(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() SCREAMING_SNAKE_CASE_: Optional[int] = 3_36 if model_name == "xclip-large-patch14-16-frames" else 2_24 SCREAMING_SNAKE_CASE_: int = VideoMAEImageProcessor(size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" ) SCREAMING_SNAKE_CASE_: Tuple = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" ) SCREAMING_SNAKE_CASE_: Optional[Any] = XCLIPProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = prepare_video(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = processor( text=["playing sports", "eating spaghetti", "go shopping"] , videos=_UpperCAmelCase , return_tensors="pt" , padding=_UpperCAmelCase ) print("Shape of pixel values:" , inputs.pixel_values.shape ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: List[str] = model(**_UpperCAmelCase ) # Verify outputs SCREAMING_SNAKE_CASE_: Any = outputs.logits_per_video SCREAMING_SNAKE_CASE_: str = logits_per_video.softmax(dim=1 ) print("Probs:" , _UpperCAmelCase ) # kinetics-400 if model_name == "xclip-base-patch32": SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] ) elif model_name == "xclip-base-patch32-16-frames": SCREAMING_SNAKE_CASE_: Any = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] ) elif model_name == "xclip-base-patch16": SCREAMING_SNAKE_CASE_: int = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] ) elif model_name == "xclip-base-patch16-16-frames": SCREAMING_SNAKE_CASE_: Dict = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] ) elif model_name == "xclip-large-patch14": SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] ) elif model_name == "xclip-large-patch14-16-frames": SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": SCREAMING_SNAKE_CASE_: int = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": SCREAMING_SNAKE_CASE_: int = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": SCREAMING_SNAKE_CASE_: int = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": SCREAMING_SNAKE_CASE_: int = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": SCREAMING_SNAKE_CASE_: int = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] ) else: raise ValueError(f"Model name {model_name} not supported" ) assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCAmelCase ) if push_to_hub: print("Pushing model, processor and slow tokenizer files to the hub..." ) model.push_to_hub(_UpperCAmelCase , organization="nielsr" ) processor.push_to_hub(_UpperCAmelCase , organization="nielsr" ) slow_tokenizer.push_to_hub(_UpperCAmelCase , organization="nielsr" ) if __name__ == "__main__": lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCAmelCase : Dict = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
13
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase__ = Features({'''text''': Value('''string''' )} ) UpperCAmelCase__ = Features({'''labels''': ClassLabel} ) UpperCAmelCase__ = "text" UpperCAmelCase__ = "labels" def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""") if not isinstance(features[self.label_column] , UpperCAmelCase__): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""") A__ = copy.deepcopy(self) A__ = self.label_schema.copy() A__ = features[self.label_column] A__ = label_schema return task_template @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict[str, str]: '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
14
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
0
from __future__ import annotations from collections.abc import Generator def UpperCAmelCase ( ) -> Generator[int, None, None]: """simple docstring""" __A = {} __A = 2 while True: __A = factor_map.pop(a_ , a_ ) if factor: __A = factor + prime while x in factor_map: x += factor __A = factor else: __A = prime yield prime prime += 1 def UpperCAmelCase ( a_ = 1E10 ) -> int: """simple docstring""" __A = sieve() __A = 1 while True: __A = next(a_ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(a_ ) n += 2 if __name__ == "__main__": print(solution())
15
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _a = 16 _a = 32 def _A ( UpperCamelCase_ : Accelerator, UpperCamelCase_ : int = 16, UpperCamelCase_ : str = "bert-base-cased") -> List[str]: '''simple docstring''' __lowercase = AutoTokenizer.from_pretrained(UpperCamelCase_) __lowercase = load_dataset("glue", "mrpc") def tokenize_function(UpperCamelCase_ : Optional[Any]): # max_length=None => use the model max length (it's actually the default) __lowercase = tokenizer(examples["sentence1"], examples["sentence2"], truncation=UpperCamelCase_, max_length=UpperCamelCase_) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowercase = datasets.map( UpperCamelCase_, batched=UpperCamelCase_, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=UpperCamelCase_) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase = tokenized_datasets.rename_column("label", "labels") def collate_fn(UpperCamelCase_ : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase_, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(UpperCamelCase_, padding="longest", return_tensors="pt") # Instantiate dataloaders. __lowercase = DataLoader( tokenized_datasets["train"], shuffle=UpperCamelCase_, collate_fn=UpperCamelCase_, batch_size=UpperCamelCase_) __lowercase = DataLoader( tokenized_datasets["validation"], shuffle=UpperCamelCase_, collate_fn=UpperCamelCase_, batch_size=UpperCamelCase_) return train_dataloader, eval_dataloader def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Tuple) -> Tuple: '''simple docstring''' __lowercase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase = config["lr"] __lowercase = int(config["num_epochs"]) __lowercase = int(config["seed"]) __lowercase = int(config["batch_size"]) __lowercase = args.model_name_or_path set_seed(UpperCamelCase_) __lowercase ,__lowercase = get_dataloaders(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase_, return_dict=UpperCamelCase_) # Instantiate optimizer __lowercase = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __lowercase = optimizer_cls(params=model.parameters(), lr=UpperCamelCase_) if accelerator.state.deepspeed_plugin is not None: __lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: __lowercase = 1 __lowercase = (len(UpperCamelCase_) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __lowercase = get_linear_schedule_with_warmup( optimizer=UpperCamelCase_, num_warmup_steps=0, num_training_steps=UpperCamelCase_, ) else: __lowercase = DummyScheduler(UpperCamelCase_, total_num_steps=UpperCamelCase_, warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = accelerator.prepare( UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_) # We need to keep track of how many total steps we have iterated over __lowercase = 0 # We also need to keep track of the stating epoch so files are named properly __lowercase = 0 # Now we train the model __lowercase = evaluate.load("glue", "mrpc") __lowercase = 0 __lowercase = {} for epoch in range(UpperCamelCase_, UpperCamelCase_): model.train() for step, batch in enumerate(UpperCamelCase_): __lowercase = model(**UpperCamelCase_) __lowercase = outputs.loss __lowercase = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase_) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __lowercase = 0 for step, batch in enumerate(UpperCamelCase_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): __lowercase = model(**UpperCamelCase_) __lowercase = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times __lowercase ,__lowercase = accelerator.gather( (predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase_) - 1: __lowercase = predictions[: len(eval_dataloader.dataset) - samples_seen] __lowercase = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase_, references=UpperCamelCase_, ) __lowercase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""", UpperCamelCase_) __lowercase = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: __lowercase = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(UpperCamelCase_, UpperCamelCase_) def _A ( ) -> List[str]: '''simple docstring''' __lowercase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") parser.add_argument( "--model_name_or_path", type=UpperCamelCase_, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=UpperCamelCase_, ) parser.add_argument( "--output_dir", type=UpperCamelCase_, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--performance_lower_bound", type=UpperCamelCase_, default=UpperCamelCase_, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", ) parser.add_argument( "--num_epochs", type=UpperCamelCase_, default=3, help="Number of train epochs.", ) __lowercase = parser.parse_args() __lowercase = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(UpperCamelCase_, UpperCamelCase_) if __name__ == "__main__": main()
17
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
0
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class a__ ( A__ ): def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size SCREAMING_SNAKE_CASE_ : Dict = seq_length SCREAMING_SNAKE_CASE_ : Dict = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : int = use_token_type_ids SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels SCREAMING_SNAKE_CASE_ : Tuple = vocab_size SCREAMING_SNAKE_CASE_ : Any = hidden_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_act SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : str = max_position_embeddings SCREAMING_SNAKE_CASE_ : str = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE_ : Tuple = num_labels SCREAMING_SNAKE_CASE_ : List[Any] = num_choices SCREAMING_SNAKE_CASE_ : Dict = scope SCREAMING_SNAKE_CASE_ : int = q_groups SCREAMING_SNAKE_CASE_ : Tuple = k_groups SCREAMING_SNAKE_CASE_ : List[Any] = v_groups SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups SCREAMING_SNAKE_CASE_ : int = intermediate_groups SCREAMING_SNAKE_CASE_ : List[Any] = output_groups def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) SCREAMING_SNAKE_CASE_ : List[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : str = None if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : str ): """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,) def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ : Any = model(_A,_A ) SCREAMING_SNAKE_CASE_ : List[str] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model( _A,attention_mask=_A,start_positions=_A,end_positions=_A ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.num_labels SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() SCREAMING_SNAKE_CASE_ : Optional[int] = model( _A,attention_mask=_A,labels=_A,) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class a__ ( A__ , A__ , unittest.TestCase ): A = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) A = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) A = False A = True A = False def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self ) SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 ) def __UpperCamelCase ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_A ) def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A ) def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_A ) def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A ) def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_A ) def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A ) @slow def __UpperCamelCase ( self : Any ): """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_sentencepiece @require_tokenizers @require_torch class a__ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] ) SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) ) self.assertEqual(output.shape,_A ) SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
18
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
0
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __A =logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( snake_case_ ): def __init__( self , lowercase , lowercase=768 ) -> Optional[int]: super().__init__(lowercase ) lowerCamelCase_ = proj_size lowerCamelCase_ = CLIPVisionModel(lowercase ) lowerCamelCase_ = PaintByExampleMapper(lowercase ) lowerCamelCase_ = nn.LayerNorm(config.hidden_size ) lowerCamelCase_ = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling lowerCamelCase_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> List[Any]: lowerCamelCase_ = self.model(pixel_values=lowercase ) lowerCamelCase_ = clip_output.pooler_output lowerCamelCase_ = self.mapper(latent_states[:, None] ) lowerCamelCase_ = self.final_layer_norm(lowercase ) lowerCamelCase_ = self.proj_out(lowercase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase ) -> Any: super().__init__() lowerCamelCase_ = (config.num_hidden_layers + 1) // 5 lowerCamelCase_ = config.hidden_size lowerCamelCase_ = 1 lowerCamelCase_ = nn.ModuleList( [ BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn="gelu" , attention_bias=lowercase ) for _ in range(lowercase ) ] ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple: for block in self.blocks: lowerCamelCase_ = block(lowercase ) return hidden_states
19
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
0
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000_000 ) -> int: lowercase : Optional[Any] = 1 lowercase : List[Any] = 1 lowercase : Any = {1: 1} for inputa in range(2 , SCREAMING_SNAKE_CASE__ ): lowercase : Any = 0 lowercase : int = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowercase : Optional[Any] = (3 * number) + 1 counter += 1 if inputa not in counters: lowercase : Union[str, Any] = counter if counter > pre_counter: lowercase : List[Any] = inputa lowercase : str = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
20
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
0
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase_( lowerCamelCase_ ) -> Any: warnings.warn( 'The preprocess method is deprecated and will be removed in a future version. Please' ' use VaeImageProcessor.preprocess instead' , lowerCamelCase_ , ) if isinstance(lowerCamelCase_ , torch.Tensor ): return image elif isinstance(lowerCamelCase_ , PIL.Image.Image ): _lowercase : int = [image] if isinstance(image[0] , PIL.Image.Image ): _lowercase , _lowercase : Dict = image[0].size _lowercase , _lowercase : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 _lowercase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _lowercase : Optional[Any] = np.concatenate(lowerCamelCase_ , axis=0 ) _lowercase : Optional[int] = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0 _lowercase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) _lowercase : str = 2.0 * image - 1.0 _lowercase : Dict = torch.from_numpy(lowerCamelCase_ ) elif isinstance(image[0] , torch.Tensor ): _lowercase : List[Any] = torch.cat(lowerCamelCase_ , dim=0 ) return image def UpperCamelCase_( lowerCamelCase_ ) -> str: if isinstance(lowerCamelCase_ , torch.Tensor ): return mask elif isinstance(lowerCamelCase_ , PIL.Image.Image ): _lowercase : Dict = [mask] if isinstance(mask[0] , PIL.Image.Image ): _lowercase , _lowercase : Dict = mask[0].size _lowercase , _lowercase : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _lowercase : List[str] = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask] _lowercase : Any = np.concatenate(lowerCamelCase_ , axis=0 ) _lowercase : Any = mask.astype(np.floataa ) / 2_55.0 _lowercase : List[str] = 0 _lowercase : Dict = 1 _lowercase : int = torch.from_numpy(lowerCamelCase_ ) elif isinstance(mask[0] , torch.Tensor ): _lowercase : Tuple = torch.cat(lowerCamelCase_ , dim=0 ) return mask class _lowerCamelCase( _a ): lowercase_ : UNetaDModel lowercase_ : RePaintScheduler def __init__( self, lowerCamelCase, lowerCamelCase) -> List[str]: """simple docstring""" super().__init__() self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase) @torch.no_grad() def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 2_50, lowerCamelCase = 0.0, lowerCamelCase = 10, lowerCamelCase = 10, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" _lowercase : Tuple = image _lowercase : List[str] = _preprocess_image(lowerCamelCase) _lowercase : List[Any] = original_image.to(device=self.device, dtype=self.unet.dtype) _lowercase : int = _preprocess_mask(lowerCamelCase) _lowercase : Dict = mask_image.to(device=self.device, dtype=self.unet.dtype) _lowercase : Optional[int] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''') _lowercase : int = original_image.shape _lowercase : Dict = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(lowerCamelCase, lowerCamelCase, lowerCamelCase, self.device) _lowercase : Optional[Any] = eta _lowercase : Dict = self.scheduler.timesteps[0] + 1 _lowercase : Optional[Any] = generator[0] if isinstance(lowerCamelCase, lowerCamelCase) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): if t < t_last: # predict the noise residual _lowercase : int = self.unet(lowerCamelCase, lowerCamelCase).sample # compute previous image: x_t -> x_t-1 _lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample else: # compute the reverse: x_t-1 -> x_t _lowercase : int = self.scheduler.undo_step(lowerCamelCase, lowerCamelCase, lowerCamelCase) _lowercase : List[Any] = t _lowercase : Dict = (image / 2 + 0.5).clamp(0, 1) _lowercase : Optional[Any] = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": _lowercase : Tuple = self.numpy_to_pil(lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase)
21
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE :Any = { '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Union[str, Any] = ['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :List[str] = ['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :int = [ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Optional[Any] = [ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
0
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCamelCase__: Optional[Any] = { "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCamelCase__: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
23
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
0
import math def lowerCamelCase__ ( snake_case_ : int ) -> int: if not isinstance(snake_case_ , snake_case_ ): __snake_case = f"""Input value of [number={number}] must be an integer""" raise TypeError(snake_case_ ) if number < 1: __snake_case = f"""Input value of [number={number}] must be > 0""" raise ValueError(snake_case_ ) elif number == 1: return 3 elif number == 2: return 5 else: __snake_case = int(math.log(number // 3 , 2 ) ) + 2 __snake_case = [3, 5] __snake_case = 2 __snake_case = 3 for block in range(1 , snake_case_ ): for _ in range(snake_case_ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): snake_case_ = 0 try: snake_case_ = proth(number) except ValueError: print(F'ValueError: there is no {number}th Proth number') continue print(F'The {number}th Proth number: {value}')
24
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
0
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
0
'''simple docstring''' import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Dict = fname.split(os.path.sep )[-1] return re.search(r'^(.*)_\d+\.jpg$' , _SCREAMING_SNAKE_CASE ).groups()[0] class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=None , __a=None ): '''simple docstring''' __a : Any = file_names __a : List[str] = image_transform __a : List[str] = label_to_id def __len__( self ): '''simple docstring''' return len(self.file_names ) def __getitem__( self , __a ): '''simple docstring''' __a : Dict = self.file_names[idx] __a : Tuple = PIL.Image.open(__a ) __a : int = raw_image.convert('RGB' ) if self.image_transform is not None: __a : List[Any] = self.image_transform(__a ) __a : List[str] = extract_label(__a ) if self.label_to_id is not None: __a : List[Any] = self.label_to_id[label] return {"image": image, "label": label} def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ): # Initialize accelerator if args.with_tracking: __a : Optional[int] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: __a : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a : Optional[int] = config['lr'] __a : Optional[Any] = int(config['num_epochs'] ) __a : Tuple = int(config['seed'] ) __a : List[str] = int(config['batch_size'] ) __a : Union[str, Any] = config['image_size'] if not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): __a : Optional[int] = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": __a : Optional[int] = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __a : Dict = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: __a : Optional[Any] = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __a : List[str] = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('.' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Grab all the image filenames __a : Optional[int] = [os.path.join(args.data_dir , _SCREAMING_SNAKE_CASE ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences __a : List[str] = [extract_label(_SCREAMING_SNAKE_CASE ) for fname in file_names] __a : Tuple = list(set(_SCREAMING_SNAKE_CASE ) ) id_to_label.sort() __a : Optional[Any] = {lbl: i for i, lbl in enumerate(_SCREAMING_SNAKE_CASE )} # Set the seed before splitting the data. np.random.seed(_SCREAMING_SNAKE_CASE ) torch.manual_seed(_SCREAMING_SNAKE_CASE ) torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE ) # Split our filenames between train and validation __a : int = np.random.permutation(len(_SCREAMING_SNAKE_CASE ) ) __a : str = int(0.8 * len(_SCREAMING_SNAKE_CASE ) ) __a : List[Any] = random_perm[:cut] __a : int = random_perm[cut:] # For training we use a simple RandomResizedCrop __a : str = Compose([RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.5, 1.0) ), ToTensor()] ) __a : Dict = PetsDataset( [file_names[i] for i in train_split] , image_transform=_SCREAMING_SNAKE_CASE , label_to_id=_SCREAMING_SNAKE_CASE ) # For evaluation, we use a deterministic Resize __a : Optional[int] = Compose([Resize(_SCREAMING_SNAKE_CASE ), ToTensor()] ) __a : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=_SCREAMING_SNAKE_CASE , label_to_id=_SCREAMING_SNAKE_CASE ) # Instantiate dataloaders. __a : Dict = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 ) __a : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a : List[Any] = create_model('resnet50d' , pretrained=_SCREAMING_SNAKE_CASE , num_classes=len(_SCREAMING_SNAKE_CASE ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __a : Optional[int] = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __a : str = False for param in model.get_classifier().parameters(): __a : List[Any] = True # We normalize the batches of images to be a bit faster. __a : List[Any] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) __a : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __a : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler __a : Union[str, Any] = OneCycleLR(optimizer=_SCREAMING_SNAKE_CASE , max_lr=_SCREAMING_SNAKE_CASE , epochs=_SCREAMING_SNAKE_CASE , steps_per_epoch=len(_SCREAMING_SNAKE_CASE ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a : List[str] = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __a : List[Any] = 0 # We also need to keep track of the starting epoch so files are named properly __a : Tuple = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) __a : List[Any] = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __a : Any = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __a : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __a : Optional[int] = os.path.splitext(_SCREAMING_SNAKE_CASE )[0] if "epoch" in training_difference: __a : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1 __a : str = None else: __a : str = int(training_difference.replace('step_' , '' ) ) __a : Union[str, Any] = resume_step // len(_SCREAMING_SNAKE_CASE ) resume_step -= starting_epoch * len(_SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): model.train() if args.with_tracking: __a : Dict = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __a : int = accelerator.skip_first_batches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __a : Any = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __a : int = {k: v.to(accelerator.device ) for k, v in batch.items()} __a : Tuple = (batch['image'] - mean) / std __a : List[str] = model(_SCREAMING_SNAKE_CASE ) __a : List[Any] = torch.nn.functional.cross_entropy(_SCREAMING_SNAKE_CASE , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[int] = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __a : List[Any] = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE ) accelerator.save_state(_SCREAMING_SNAKE_CASE ) model.eval() __a : str = 0 __a : List[Any] = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. __a : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()} __a : Tuple = (batch['image'] - mean) / std with torch.no_grad(): __a : List[Any] = model(_SCREAMING_SNAKE_CASE ) __a : str = outputs.argmax(dim=-1 ) __a , __a : Dict = accelerator.gather_for_metrics((predictions, batch['label']) ) __a : Optional[int] = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __a : int = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { 'accuracy': 100 * eval_metric, 'train_loss': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), 'epoch': epoch, } , step=_SCREAMING_SNAKE_CASE , ) if checkpointing_steps == "epoch": __a : str = F"""epoch_{epoch}""" if args.output_dir is not None: __a : Tuple = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE ) accelerator.save_state(_SCREAMING_SNAKE_CASE ) if args.with_tracking: accelerator.end_training() def lowerCamelCase (): __a : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=_SCREAMING_SNAKE_CASE , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=_SCREAMING_SNAKE_CASE , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) __a : Tuple = parser.parse_args() __a : int = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
27
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCamelCase : int = False @skip_mps class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = StableDiffusionAttendAndExcitePipeline _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS _SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def A ( cls : Union[str, Any] ): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) @classmethod def A ( cls : Union[str, Any] ): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , ) UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) UpperCamelCase = CLIPTextModel(UpperCamelCase__ ) UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def A ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=0 ): """simple docstring""" if str(UpperCamelCase__ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = UpperCamelCase = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def A ( self : int ): """simple docstring""" UpperCamelCase = 'cpu' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = pipe(**UpperCamelCase__ ).images UpperCamelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) UpperCamelCase = np.array( [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] ) UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase__ , 1E-3 ) def A ( self : Any ): """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def A ( self : int ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A ( self : Union[str, Any] ): """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def A ( self : Tuple ): """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A ( self : Optional[int] ): """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def A ( self : List[Any] ): """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def A ( self : Optional[Any] ): """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @classmethod def A ( cls : Union[str, Any] ): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) @classmethod def A ( cls : Any ): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(UpperCamelCase__ ) def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Tuple ): """simple docstring""" UpperCamelCase = torch.manual_seed(5_1 ) UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa ) pipe.to('cuda' ) UpperCamelCase = 'a painting of an elephant with glasses' UpperCamelCase = [5, 7] UpperCamelCase = pipe( prompt=UpperCamelCase__ , token_indices=UpperCamelCase__ , guidance_scale=7.5 , generator=UpperCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
28
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
0
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class lowerCamelCase (_snake_case ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> float: return 0.0 def lowercase__ ( __snake_case : np.ndarray , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) UpperCAmelCase_ : Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase__ ( __snake_case : FilterType , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : List[str] = 512 UpperCAmelCase_ : str = [1] + [0] * (size - 1) UpperCAmelCase_ : Optional[Any] = [filter_type.process(__snake_case ) for item in inputs] UpperCAmelCase_ : Dict = [0] * (samplerate - size) # zero-padding outputs += filler UpperCAmelCase_ : Optional[int] = np.abs(np.fft.fft(__snake_case ) ) UpperCAmelCase_ : List[str] = 20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds UpperCAmelCase_ : Union[str, Any] = get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(__snake_case ) plt.show() def lowercase__ ( __snake_case : FilterType , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = 512 UpperCAmelCase_ : Tuple = [1] + [0] * (size - 1) UpperCAmelCase_ : Tuple = [filter_type.process(__snake_case ) for item in inputs] UpperCAmelCase_ : List[str] = [0] * (samplerate - size) # zero-padding outputs += filler UpperCAmelCase_ : Dict = np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
29
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
0
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
0
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class lowerCamelCase_ (snake_case__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase: List[str] = FlaxAutoencoderKL @property def _A ( self : Union[str, Any] ): _UpperCAmelCase : Optional[int] = 4 _UpperCAmelCase : str = 3 _UpperCAmelCase : int = (32, 32) _UpperCAmelCase : List[str] = jax.random.PRNGKey(0 ) _UpperCAmelCase : Optional[Any] = jax.random.uniform(A , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def _A ( self : Tuple ): _UpperCAmelCase : Tuple = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _UpperCAmelCase : Union[str, Any] = self.dummy_input return init_dict, inputs_dict
31
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : int = size if size is not None else {'shortest_edge': 2_2_4} a_ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ , param_name='crop_size' ) a_ : Optional[int] = do_resize a_ : Dict = size a_ : Union[str, Any] = resample a_ : Optional[int] = do_center_crop a_ : Optional[int] = crop_size a_ : Optional[int] = do_rescale a_ : List[str] = rescale_factor a_ : Optional[Any] = do_normalize a_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN a_ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD a_ : Tuple = do_convert_rgb def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> np.ndarray: a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a_ : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> np.ndarray: a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[int]: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : int , ) -> PIL.Image.Image: a_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize a_ : Optional[int] = size if size is not None else self.size a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='size' , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : int = resample if resample is not None else self.resample a_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : Tuple = crop_size if crop_size is not None else self.crop_size a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Any = do_rescale if do_rescale is not None else self.do_rescale a_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize a_ : List[Any] = image_mean if image_mean is not None else self.image_mean a_ : int = image_std if image_std is not None else self.image_std a_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a_ : List[str] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: a_ : Any = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images] # All transformations expect numpy arrays. a_ : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : Dict = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : str = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : int = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : List[Any] = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
0
"""simple docstring""" from __future__ import annotations import requests __A : Optional[Any] = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def lowercase ( __snake_case : str , __snake_case : int = 1 , __snake_case : str = "new" , __snake_case : list | None = None ): lowercase_ : Tuple = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ): lowercase_ : Union[str, Any] = F'''Invalid search term: {invalid_search_terms}''' raise ValueError(__snake_case ) lowercase_ : Optional[Any] = requests.get( F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 4_2_9: raise requests.HTTPError lowercase_ : Optional[int] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )} lowercase_ : str = {} for id_ in range(__snake_case ): lowercase_ : Dict = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
33
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
0
'''simple docstring''' def snake_case_ (_a : int , _a : int ): while b: UpperCAmelCase , UpperCAmelCase = b, a % b return a def snake_case_ (_a : int , _a : int ): return a if b == 0 else euclidean_gcd_recursive(_a , a % b ) def snake_case_ (): print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" ) print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" ) print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" ) print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" ) print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" ) print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" ) print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" ) print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" ) print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" ) print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" ) if __name__ == "__main__": main()
34
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Tuple = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): snake_case__ : str = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): snake_case__ : Optional[int] = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case__ : int = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] snake_case__ : Any = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(_lowerCAmelCase )-1}" ) if "norm" in key: snake_case__ : Union[str, Any] = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] snake_case__ : str = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(_lowerCAmelCase )-1}" ) if "layer_norm1" in key: snake_case__ : str = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: snake_case__ : Optional[int] = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 snake_case__ : Dict = key[key.find("""block""" ) + len("""block""" )] snake_case__ : Any = key.replace(f"block{idx}" , f"block.{int(_lowerCAmelCase )-1}" ) if "attn.q" in key: snake_case__ : str = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: snake_case__ : Optional[Any] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: snake_case__ : Tuple = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: snake_case__ : List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: snake_case__ : List[str] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: snake_case__ : Dict = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: snake_case__ : Union[str, Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) snake_case__ : Tuple = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case__ : Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )] snake_case__ : str = key.replace(f"linear_c{idx}" , f"linear_c.{int(_lowerCAmelCase )-1}" ) if "bot_conv" in key: snake_case__ : Dict = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: snake_case__ : Union[str, Any] = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: snake_case__ : Tuple = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: snake_case__ : Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: snake_case__ : List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: snake_case__ : Optional[int] = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: snake_case__ : Tuple = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): snake_case__ : Dict = key.replace("""module.last_layer_depth""" , """head.head""" ) snake_case__ : List[str] = value return new_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case__ : Tuple = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" ) snake_case__ : List[Any] = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" ) # next, add keys and values (in that order) to the state dict snake_case__ : str = kv_weight[ : config.hidden_sizes[i], : ] snake_case__ : List[str] = kv_bias[: config.hidden_sizes[i]] snake_case__ : Union[str, Any] = kv_weight[ config.hidden_sizes[i] :, : ] snake_case__ : List[str] = kv_bias[config.hidden_sizes[i] :] def __snake_case( ) -> Optional[Any]: snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return image @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=None ) -> Optional[int]: snake_case__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) snake_case__ : Optional[Any] = GLPNImageProcessor() # prepare image snake_case__ : Optional[int] = prepare_img() snake_case__ : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict snake_case__ : List[Any] = torch.load(_lowerCAmelCase , map_location=torch.device("""cpu""" ) ) # rename keys snake_case__ : str = rename_keys(_lowerCAmelCase ) # key and value matrices need special treatment read_in_k_v(_lowerCAmelCase , _lowerCAmelCase ) # create HuggingFace model and load state dict snake_case__ : int = GLPNForDepthEstimation(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # forward pass snake_case__ : int = model(_lowerCAmelCase ) snake_case__ : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: snake_case__ : Dict = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: snake_case__ : Optional[int] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"Unknown model name: {model_name}" ) snake_case__ : List[str] = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
35
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( a): lowerCamelCase__ = ['pixel_values'] def __init__( self, __a = True, __a = None, __a = PILImageResampling.BICUBIC, __a = True, __a = True, __a = 1 / 255, __a = None, __a = True, __a = None, __a = None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Optional[int] = size if size is not None else {"height": 224, "width": 224} _lowerCAmelCase : Optional[Any] = get_size_dict(__a) _lowerCAmelCase : str = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase : Tuple = get_size_dict(__a, default_to_square=__a, param_name="crop_size") _lowerCAmelCase : Optional[int] = do_resize _lowerCAmelCase : Optional[int] = do_rescale _lowerCAmelCase : List[str] = do_normalize _lowerCAmelCase : int = do_center_crop _lowerCAmelCase : int = crop_size _lowerCAmelCase : List[Any] = size _lowerCAmelCase : Union[str, Any] = resample _lowerCAmelCase : Union[str, Any] = rescale_factor _lowerCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD def snake_case__ ( self, __a, __a, __a = PILImageResampling.BILINEAR, __a = None, **__a, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = get_size_dict(__a) if "shortest_edge" in size: _lowerCAmelCase : Dict = get_resize_output_image_size(__a, size=size["shortest_edge"], default_to_square=__a) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _lowerCAmelCase : int = (size["height"], size["width"]) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}") return resize(__a, size=__a, resample=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a = None, **__a, ): '''simple docstring''' _lowerCAmelCase : Tuple = get_size_dict(__a) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}") return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a = None, **__a): '''simple docstring''' return rescale(__a, scale=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ): '''simple docstring''' return normalize(__a, mean=__a, std=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ): '''simple docstring''' _lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase : Union[str, Any] = get_size_dict(__a, param_name="crop_size", default_to_square=__a) _lowerCAmelCase : int = resample if resample is not None else self.resample _lowerCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase : str = image_std if image_std is not None else self.image_std _lowerCAmelCase : str = size if size is not None else self.size _lowerCAmelCase : Union[str, Any] = get_size_dict(__a) if not is_batched(__a): _lowerCAmelCase : int = [images] if not valid_images(__a): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. _lowerCAmelCase : str = [to_numpy_array(__a) for image in images] if do_resize: _lowerCAmelCase : Union[str, Any] = [self.resize(image=__a, size=__a, resample=__a) for image in images] if do_center_crop: _lowerCAmelCase : Optional[int] = [self.center_crop(image=__a, size=__a) for image in images] if do_rescale: _lowerCAmelCase : Dict = [self.rescale(image=__a, scale=__a) for image in images] if do_normalize: _lowerCAmelCase : Optional[int] = [self.normalize(image=__a, mean=__a, std=__a) for image in images] _lowerCAmelCase : str = [to_channel_dimension_format(__a, __a) for image in images] _lowerCAmelCase : str = {"pixel_values": images} return BatchFeature(data=__a, tensor_type=__a)
36
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
0
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
37
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
0
from sklearn.metrics import recall_score import datasets UpperCAmelCase_ : Dict = ''' Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. ''' UpperCAmelCase_ : List[Any] = ''' Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {\'recall\': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {\'recall\': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric(\'recall\') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {\'recall\': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric(\'recall\') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'recall\': array([1., 0., 0.])} ''' UpperCAmelCase_ : Union[str, Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _A ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=1 , __lowerCamelCase : Union[str, Any]="binary" , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple="warn" , ): UpperCamelCase :Tuple = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
38
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a = logging.get_logger(__name__) _a = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCamelCase ( snake_case__ , snake_case__): """simple docstring""" UpperCamelCase__ = "nat" UpperCamelCase__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ): """simple docstring""" super().__init__(**UpperCAmelCase ) _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(UpperCAmelCase ) _UpperCAmelCase = num_heads _UpperCAmelCase = kernel_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) ) _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
39
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowercase = logging.get_logger(__name__) class _A ( _a ,_a ): """simple docstring""" UpperCAmelCase : Optional[Any] = """maskformer-swin""" UpperCAmelCase : Optional[int] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , __UpperCAmelCase : List[Any]=224 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : int=3 , __UpperCAmelCase : int=96 , __UpperCAmelCase : Any=[2, 2, 6, 2] , __UpperCAmelCase : Tuple=[3, 6, 12, 24] , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Dict=4.0 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[str] , ): super().__init__(**__UpperCAmelCase) a : int = image_size a : str = patch_size a : Optional[int] = num_channels a : str = embed_dim a : int = depths a : Dict = len(__UpperCAmelCase) a : Dict = num_heads a : Union[str, Any] = window_size a : Optional[Any] = mlp_ratio a : Any = qkv_bias a : str = hidden_dropout_prob a : List[str] = attention_probs_dropout_prob a : Optional[int] = drop_path_rate a : List[str] = hidden_act a : int = use_absolute_embeddings a : int = layer_norm_eps a : List[str] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a : Dict = int(embed_dim * 2 ** (len(__UpperCAmelCase) - 1)) a : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase) + 1)] a , a : int = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names)
40
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
0