code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from __future__ import annotations def lowerCAmelCase_ ( snake_case_ ): create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,): if index == len(snake_case_ ): print(snake_case_ ) return for i in range(len(snake_case_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _A : List[Any] = True create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ ) current_sequence.pop() _A : Union[str, Any] = False _snake_case = [3, 1, 2, 4] generate_all_permutations(sequence) _snake_case = ["A", "B", "C"] generate_all_permutations(sequence_a)
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _snake_case = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _snake_case = typing.Union[np.floataa, int, float] # noqa: UP007 def lowerCAmelCase_ ( snake_case_,snake_case_ ): return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): return sum((va - va) ** 2 for va, va in zip(snake_case_,snake_case_ ) ) ** (1 / 2) if __name__ == "__main__": def lowerCAmelCase_ ( ): from timeit import timeit print("""Without Numpy""" ) print( timeit( """euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""",number=10000,globals=globals(),) ) print("""With Numpy""" ) print( timeit( """euclidean_distance([1, 2, 3], [4, 5, 6])""",number=10000,globals=globals(),) ) benchmark()
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] _snake_case = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def lowerCAmelCase_ ( snake_case_ ): _A : int = torch.load(snake_case_,map_location="""cpu""" ) return sd def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=rename_keys_prefix ): _A : Dict = OrderedDict() _A : Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A : Dict = key for name_pair in rename_keys_prefix: _A : Tuple = new_key.replace(name_pair[0],name_pair[1] ) _A : Dict = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A : Optional[int] = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_ ): assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _A : str = """pretraining""" if "vcr" in checkpoint_path: _A : int = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: _A : Any = {"""visual_embedding_dim""": 2048} elif "vqa" in checkpoint_path: _A : int = {"""visual_embedding_dim""": 2048} elif "nlvr" in checkpoint_path: _A : Optional[int] = {"""visual_embedding_dim""": 1024} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: _A : Optional[Any] = {"""visual_embedding_dim""": 512} _A : List[Any] = """multichoice""" elif "vqa_advanced" in checkpoint_path: _A : List[Any] = {"""visual_embedding_dim""": 2048} _A : str = """vqa_advanced""" elif "vqa" in checkpoint_path: _A : Optional[Any] = {"""visual_embedding_dim""": 2048, """num_labels""": 3129} _A : Any = """vqa""" elif "nlvr" in checkpoint_path: _A : int = { """visual_embedding_dim""": 1024, """num_labels""": 2, } _A : List[Any] = """nlvr""" _A : Tuple = VisualBertConfig(**snake_case_ ) # Load State Dict _A : Any = load_state_dict(snake_case_ ) _A : List[Any] = get_new_dict(snake_case_,snake_case_ ) if model_type == "pretraining": _A : Tuple = VisualBertForPreTraining(snake_case_ ) elif model_type == "vqa": _A : Tuple = VisualBertForQuestionAnswering(snake_case_ ) elif model_type == "nlvr": _A : Union[str, Any] = VisualBertForVisualReasoning(snake_case_ ) elif model_type == "multichoice": _A : List[Any] = VisualBertForMultipleChoice(snake_case_ ) model.load_state_dict(snake_case_ ) # Save Checkpoints Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") _snake_case = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch _snake_case = logging.get_logger(__name__) class lowercase ( UpperCamelCase__ ): _a = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = True , **_a , ) -> None: super().__init__(**_a ) _A : Optional[Any] = size if size is not None else {"""shortest_edge""": 224} _A : int = get_size_dict(_a , default_to_square=_a ) _A : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256} _A : str = get_size_dict(_a , param_name="""crop_size""" ) _A : List[Any] = do_resize _A : Dict = size _A : Union[str, Any] = resample _A : int = do_rescale _A : Any = rescale_factor _A : Union[str, Any] = do_center_crop _A : str = crop_size _A : Optional[int] = do_flip_channel_order def a__ ( self , _a , _a , _a = PIL.Image.BILINEAR , _a = None , **_a , ) -> np.ndarray: _A : List[Any] = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) _A : Optional[Any] = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: _A : Union[str, Any] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def a__ ( self , _a , _a , _a = None , **_a , ) -> List[Any]: return rescale(_a , scale=_a , data_format=_a , **_a ) def a__ ( self , _a , _a = None ) -> np.ndarray: return flip_channel_order(_a , data_format=_a ) def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _A : int = do_resize if do_resize is not None else self.do_resize _A : List[str] = resample if resample is not None else self.resample _A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _A : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _A : Any = do_center_crop if do_center_crop is not None else self.do_center_crop _A : List[Any] = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) _A : Union[str, Any] = size if size is not None else self.size _A : List[str] = get_size_dict(_a , default_to_square=_a ) _A : Any = crop_size if crop_size is not None else self.crop_size _A : Dict = get_size_dict(_a , param_name="""crop_size""" ) _A : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) # All transformations expect numpy arrays. _A : List[Any] = [to_numpy_array(_a ) for image in images] if do_resize: _A : Union[str, Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: _A : str = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: _A : List[Any] = [self.rescale(image=_a , scale=_a ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: _A : Optional[Any] = [self.flip_channel_order(image=_a ) for image in images] _A : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _A : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def a__ ( self , _a , _a = None ) -> str: _A : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): _A : Optional[int] = target_sizes.numpy() _A : Optional[Any] = [] for idx in range(len(_a ) ): _A : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) _A : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: _A : List[str] = logits.argmax(dim=1 ) _A : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
from collections.abc import Iterable from typing import Any class lowercase : def __init__( self , _a = None ) -> Dict: _A : Any = value _A : Node | None = None # Added in order to delete a node easier _A : Node | None = None _A : Node | None = None def __repr__( self ) -> str: from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 ) class lowercase : def __init__( self , _a = None ) -> Optional[Any]: _A : Dict = root def __str__( self ) -> str: return str(self.root ) def a__ ( self , _a , _a ) -> None: if new_children is not None: # reset its kids _A : Tuple = node.parent if node.parent is not None: # reset its parent if self.is_right(_a ): # If it is the right children _A : Dict = new_children else: _A : int = new_children else: _A : List[str] = new_children def a__ ( self , _a ) -> bool: if node.parent and node.parent.right: return node == node.parent.right return False def a__ ( self ) -> bool: return self.root is None def a__ ( self , _a ) -> None: _A : List[Any] = Node(_a ) # create a new Node if self.empty(): # if Tree is empty _A : Union[str, Any] = new_node # set its root else: # Tree is not empty _A : int = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: _A : List[Any] = new_node # We insert the new node in a leaf break else: _A : Union[str, Any] = parent_node.left else: if parent_node.right is None: _A : List[Any] = new_node break else: _A : int = parent_node.right _A : Optional[int] = parent_node def a__ ( self , *_a ) -> None: for value in values: self.__insert(_a ) def a__ ( self , _a ) -> Node | None: if self.empty(): raise IndexError("""Warning: Tree is empty! please use another.""" ) else: _A : Union[str, Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: _A : int = node.left if value < node.value else node.right return node def a__ ( self , _a = None ) -> Node | None: if node is None: if self.root is None: return None _A : Union[str, Any] = self.root if not self.empty(): while node.right is not None: _A : str = node.right return node def a__ ( self , _a = None ) -> Node | None: if node is None: _A : Dict = self.root if self.root is None: return None if not self.empty(): _A : str = self.root while node.left is not None: _A : int = node.left return node def a__ ( self , _a ) -> None: _A : Tuple = self.search(_a ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_a , _a ) elif node.left is None: # Has only right children self.__reassign_nodes(_a , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_a , node.left ) else: _A : Tuple = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore _A : Union[str, Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def a__ ( self , _a ) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def a__ ( self , _a=None ) -> Any: if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def a__ ( self , _a , _a ) -> None: if node: self.inorder(_a , node.left ) arr.append(node.value ) self.inorder(_a , node.right ) def a__ ( self , _a , _a ) -> int: _A : list[int] = [] self.inorder(_a , _a ) # append all values to list using inorder traversal return arr[k - 1] def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [] if curr_node is not None: _A : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCAmelCase_ ( ): _A : int = (8, 3, 6, 1, 10, 14, 13, 4, 7) _A : List[Any] = BinarySearchTree() for i in testlist: t.insert(snake_case_ ) # Prints all the elements of the list in order traversal print(snake_case_ ) if t.search(6 ) is not None: print("""The value 6 exists""" ) else: print("""The value 6 doesn't exist""" ) if t.search(-1 ) is not None: print("""The value -1 exists""" ) else: print("""The value -1 doesn't exist""" ) if not t.empty(): print("""Max Value: """,t.get_max().value ) # type: ignore print("""Min Value: """,t.get_min().value ) # type: ignore for i in testlist: t.remove(snake_case_ ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class lowercase ( UpperCamelCase__ ): _a = "openai/whisper-base" _a = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) _a = "transcriber" _a = WhisperProcessor _a = WhisperForConditionalGeneration _a = ["audio"] _a = ["text"] def a__ ( self , _a ) -> Dict: return self.pre_processor(_a , return_tensors="""pt""" ).input_features def a__ ( self , _a ) -> str: return self.model.generate(inputs=_a ) def a__ ( self , _a ) -> Tuple: return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0]
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): # ===== initialization ===== _A : List[str] = Mock() _A : Optional[int] = conn, Mock() _A : Union[str, Any] = iter([1, None] ) _A : List[str] = lambda snake_case_ : next(snake_case_ ) # ===== invoke ===== send_file(filename="""mytext.txt""",testing=snake_case_ ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], "tokenization_roberta": ["RobertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["RobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", "RobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaMainLayer", "TFRobertaModel", "TFRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
from manim import * class lowercase ( UpperCamelCase__ ): def a__ ( self ) -> Dict: _A : List[Any] = Rectangle(height=0.5 , width=0.5 ) _A : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A : Optional[Any] = [mem.copy() for i in range(6 )] _A : List[Any] = [mem.copy() for i in range(6 )] _A : Optional[Any] = VGroup(*_a ).arrange(_a , buff=0 ) _A : Any = VGroup(*_a ).arrange(_a , buff=0 ) _A : Optional[int] = VGroup(_a , _a ).arrange(_a , buff=0 ) _A : Optional[Any] = Text("""CPU""" , font_size=24 ) _A : List[str] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_a ) _A : Optional[Any] = [mem.copy() for i in range(4 )] _A : Optional[int] = VGroup(*_a ).arrange(_a , buff=0 ) _A : Optional[Any] = Text("""GPU""" , font_size=24 ) _A : Union[str, Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) gpu.move_to([-1, -1, 0] ) self.add(_a ) _A : str = [mem.copy() for i in range(6 )] _A : int = VGroup(*_a ).arrange(_a , buff=0 ) _A : str = Text("""Model""" , font_size=24 ) _A : List[Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) model.move_to([3, -1.0, 0] ) self.add(_a ) _A : List[str] = [] for i, rect in enumerate(_a ): rect.set_stroke(_a ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _A : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=_a , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=_a , buff=0.0 ) self.add(_a ) cpu_targs.append(_a ) _A : Union[str, Any] = [mem.copy() for i in range(6 )] _A : str = VGroup(*_a ).arrange(_a , buff=0 ) _A : List[str] = Text("""Loaded Checkpoint""" , font_size=24 ) _A : Optional[int] = Group(_a , _a ).arrange(_a , aligned_edge=_a , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _A : List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A : int = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_a , _a ) _A : List[str] = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) _A : Optional[int] = MarkupText( F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_a ) , Write(_a ) ) self.play(Write(_a , run_time=1 ) , Create(_a , run_time=1 ) ) _A : Optional[int] = [] _A : Dict = [] for i, rect in enumerate(_a ): _A : int = fill.copy().set_fill(_a , opacity=0.7 ) target.move_to(_a ) first_animations.append(GrowFromCenter(_a , run_time=1 ) ) _A : Optional[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_a , run_time=1.5 ) ) self.play(*_a ) self.play(*_a ) self.wait()
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCAmelCase_ ( *snake_case_ ): with open(snake_case_,"""r""" ) as fh: fcntl.flock(snake_case_,fcntl.LOCK_EX ) try: print(*snake_case_ ) finally: fcntl.flock(snake_case_,fcntl.LOCK_UN ) _snake_case = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) _snake_case = torch.device("cuda", local_rank) _snake_case = socket.gethostname() _snake_case = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank _snake_case = dist.get_rank() _snake_case = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1
from math import ceil, sqrt def lowerCAmelCase_ ( snake_case_ = 1000000 ): _A : int = 0 for outer_width in range(3,(limit // 4) + 2 ): if outer_width**2 > limit: _A : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ),1 ) else: _A : Optional[int] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model"} _snake_case = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ) -> None: _A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token _A : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) _A : List[str] = 3 _A : List[str] = do_lower_case _A : Dict = remove_space _A : Optional[int] = keep_accents _A : Union[str, Any] = vocab_file _A : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _A : Optional[int] = jieba _A : Union[str, Any] = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def a__ ( self ) -> List[Any]: return len(self.sp_model ) def a__ ( self ) -> Tuple: _A : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: _A : List[Any] = self.__dict__.copy() _A : Optional[int] = None return state def __setstate__( self , _a ) -> Union[str, Any]: _A : Dict = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _A : Optional[int] = {} _A : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a__ ( self , _a ) -> str: if self.remove_space: _A : Dict = """ """.join(inputs.strip().split() ) else: _A : List[str] = inputs _A : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _A : List[str] = unicodedata.normalize("""NFKD""" , _a ) _A : Dict = """""".join([c for c in outputs if not unicodedata.combining(_a )] ) if self.do_lower_case: _A : Union[str, Any] = outputs.lower() return outputs def a__ ( self , _a ) -> List[str]: _A : List[str] = self.preprocess_text(_a ) _A : Tuple = self.sp_model.encode(_a , out_type=_a ) _A : List[str] = [] for piece in pieces: if len(_a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _A : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _A : int = cur_pieces[1:] else: _A : str = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_a ) else: new_pieces.append(_a ) return new_pieces def a__ ( self , _a ) -> Union[str, Any]: return self.sp_model.PieceToId(_a ) def a__ ( self , _a ) -> Optional[Any]: return self.sp_model.IdToPiece(_a ) def a__ ( self , _a ) -> int: _A : Tuple = """""".join(_a ).replace(_a , """ """ ).strip() return out_string def a__ ( self , _a , _a = None ) -> List[int]: _A : Dict = [self.sep_token_id] _A : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def a__ ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is not None: return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1] return ([0] * len(_a )) + [1, 1] def a__ ( self , _a , _a = None ) -> List[int]: _A : Union[str, Any] = [self.sep_token_id] _A : Any = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : Optional[int] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , """wb""" ) as fi: _A : List[Any] = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def a__ ( self , *_a , **_a ) -> Optional[Any]: _A : Optional[Any] = super()._decode(*_a , **_a ) _A : Optional[int] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "vocab.txt"} _snake_case = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } _snake_case = { "openbmb/cpm-ant-10b": 1024, } def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = collections.OrderedDict() with open(snake_case_,"""r""",encoding="""utf-8""" ) as reader: _A : str = reader.readlines() for index, token in enumerate(snake_case_ ): _A : Any = token.rstrip("""\n""" ) _A : List[str] = index return vocab class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a="<unk>" , _a=200 ) -> str: _A : Optional[Any] = vocab _A : str = unk_token _A : Optional[int] = max_input_chars_per_word def a__ ( self , _a ) -> int: _A : Optional[int] = list(_a ) if len(_a ) > self.max_input_chars_per_word: return [self.unk_token] _A : Union[str, Any] = 0 _A : List[str] = [] while start < len(_a ): _A : int = len(_a ) _A : Any = None while start < end: _A : Optional[int] = """""".join(chars[start:end] ) if substr in self.vocab: _A : Dict = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_a ) _A : Any = end return sub_tokens class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "attention_mask"] _a = False def __init__( self , _a , _a="<d>" , _a="</d>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a="</n>" , _a="</_>" , _a="left" , **_a , ) -> Tuple: requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=_a , eod_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , unk_token=_a , line_token=_a , space_token=_a , padding_side=_a , **_a , ) _A : List[Any] = bod_token _A : List[str] = eod_token _A : List[str] = load_vocab(_a ) _A : List[str] = self.encoder[space_token] _A : Tuple = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] _A : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _a : x[1] ) ) _A : str = {v: k for k, v in self.encoder.items()} _A : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def a__ ( self ) -> str: return self.encoder[self.bod_token] @property def a__ ( self ) -> int: return self.encoder[self.eod_token] @property def a__ ( self ) -> Optional[Any]: return self.encoder["\n"] @property def a__ ( self ) -> int: return len(self.encoder ) def a__ ( self ) -> List[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self , _a ) -> Dict: _A : Optional[int] = [] for x in jieba.cut(_a , cut_all=_a ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) ) return output_tokens def a__ ( self , _a , **_a ) -> str: _A : List[str] = [i for i in token_ids if i >= 0] _A : Dict = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_a , **_a ) def a__ ( self , _a ) -> Union[str, Any]: return token in self.encoder def a__ ( self , _a ) -> str: return "".join(_a ) def a__ ( self , _a ) -> str: return self.encoder.get(_a , self.encoder.get(self.unk_token ) ) def a__ ( self , _a ) -> Tuple: return self.decoder.get(_a , self.unk_token ) def a__ ( self , _a , _a = None ) -> Tuple[str]: if os.path.isdir(_a ): _A : List[Any] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: _A : Optional[int] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory _A : Dict = 0 if " " in self.encoder: _A : str = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: _A : Optional[int] = self.encoder["""\n"""] del self.encoder["\n"] _A : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _a : x[1] ) ) with open(_a , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) _A : str = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def a__ ( self , _a , _a = None ) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def a__ ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is not None: return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) return [1] + ([0] * len(_a ))
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging _snake_case = logging.get_logger(__name__) class lowercase ( UpperCamelCase__ ): _a = ["audio_values", "audio_mask"] def __init__( self , _a=2048 , _a=1 , _a=[16, 16] , _a=128 , _a=4_4100 , _a=86 , _a=2048 , _a=0.0 , **_a , ) -> List[Any]: super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) _A : Any = spectrogram_length _A : Dict = num_channels _A : Optional[Any] = patch_size _A : str = feature_size // self.patch_size[1] _A : List[Any] = n_fft _A : Optional[Any] = sampling_rate // hop_length_to_sampling_rate _A : List[str] = sampling_rate _A : Union[str, Any] = padding_value _A : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def a__ ( self , _a ) -> np.ndarray: _A : Optional[int] = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) _A : Tuple = log_spec[:, :-1] _A : int = log_spec - 20.0 _A : Tuple = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) _A : int = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A : Optional[Any] = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): _A : str = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A : Dict = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A : Dict = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _A : Union[str, Any] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): _A : Optional[int] = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _A : Any = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _A : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _A : List[str] = np.array(_a ).astype(np.floataa ) # convert into correct format for padding _A : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _A : int = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _A : Optional[int] = padded_audio_features * self.padding_value for i in range(len(_a ) ): _A : Union[str, Any] = audio_features[i] _A : Optional[Any] = feature # return as BatchFeature if return_attention_mask: _A : Optional[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: _A : Any = {"""audio_values""": padded_audio_features} _A : int = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_ ): def run_func(snake_case_ ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_,**snake_case_ ): return func(*snake_case_,**snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_,**snake_case_ ): return func(*snake_case_,**snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : List[Any] = random.Random() _A : Optional[int] = [rng.randint(0,vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_,shape=(batch_size, sequence_length),dtype=tf.intaa ) class lowercase ( UpperCamelCase__ ): _a = 42 _a = 42 _a = "TensorFlow" @property def a__ ( self ) -> Any: return tf.__version__ def a__ ( self , _a , _a , _a ) -> float: # initialize GPU on separate process _A : List[Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _A : Optional[Any] = self._prepare_inference_func(_a , _a , _a ) return self._measure_speed(_inference ) def a__ ( self , _a , _a , _a ) -> float: _A : Optional[int] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _A : Dict = self._prepare_train_func(_a , _a , _a ) return self._measure_speed(_train ) def a__ ( self , _a , _a , _a ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a ) _A : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _A : Dict = self._prepare_inference_func(_a , _a , _a ) return self._measure_memory(_inference ) def a__ ( self , _a , _a , _a ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a ) _A : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _A : str = self._prepare_train_func(_a , _a , _a ) return self._measure_memory(_train ) def a__ ( self , _a , _a , _a ) -> Callable[[], None]: _A : Optional[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _A : List[Any] = ( hasattr(_a , """architectures""" ) and isinstance(config.architectures , _a ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _A : Dict = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _A : Tuple = __import__("""transformers""" , fromlist=[model_class] ) _A : Tuple = getattr(_a , _a ) _A : Optional[int] = model_cls(_a ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _A : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](_a ) # encoder-decoder has vocab size saved differently _A : Any = config.vocab_size if hasattr(_a , """vocab_size""" ) else config.encoder.vocab_size _A : Dict = random_input_ids(_a , _a , _a ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(_a , decoder_input_ids=_a , training=_a ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(_a , training=_a ) _A : str = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def a__ ( self , _a , _a , _a ) -> Callable[[], None]: _A : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _A : str = ( hasattr(_a , """architectures""" ) and isinstance(config.architectures , _a ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _A : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _A : Union[str, Any] = __import__("""transformers""" , fromlist=[model_class] ) _A : Any = getattr(_a , _a ) _A : List[Any] = model_cls(_a ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _A : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_a ) # encoder-decoder has vocab size saved differently _A : str = config.vocab_size if hasattr(_a , """vocab_size""" ) else config.encoder.vocab_size _A : Optional[int] = random_input_ids(_a , _a , _a ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _A : str = model(_a , decoder_input_ids=_a , labels=_a , training=_a )[0] _A : List[str] = tf.gradients(_a , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _A : str = model(_a , labels=_a , training=_a )[0] _A : Tuple = tf.gradients(_a , model.trainable_variables ) return gradients _A : List[str] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def a__ ( self , _a ) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(_a , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _A : Union[str, Any] = timeit.repeat( _a , repeat=self.args.repeat , number=10 , ) return min(_a ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) def a__ ( self , _a ) -> [Memory, MemorySummary]: logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _A : int = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _A : List[str] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _A : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _A : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(_a ) _A : int = meminfo.used _A : Tuple = Memory(_a ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _A : Optional[Any] = None else: _A : Tuple = measure_peak_memory_cpu(_a ) _A : int = Memory(_a ) if isinstance(_a , _a ) else memory_bytes if self.args.trace_memory_line_by_line: _A : Optional[int] = stop_memory_tracing(_a ) if memory is None: _A : Tuple = summary.total else: _A : Optional[Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "transfo-xl" _a = ["mems"] _a = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _a=26_7735 , _a=[2_0000, 4_0000, 20_0000] , _a=1024 , _a=1024 , _a=16 , _a=64 , _a=4096 , _a=4 , _a=False , _a=18 , _a=1600 , _a=1000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ) -> Tuple: _A : Union[str, Any] = vocab_size _A : Union[str, Any] = [] self.cutoffs.extend(_a ) if proj_share_all_but_first: _A : Union[str, Any] = [False] + [True] * len(self.cutoffs ) else: _A : Dict = [False] + [False] * len(self.cutoffs ) _A : List[str] = d_model _A : Dict = d_embed _A : Dict = d_head _A : Optional[int] = d_inner _A : Tuple = div_val _A : Union[str, Any] = pre_lnorm _A : Union[str, Any] = n_layer _A : str = n_head _A : Optional[Any] = mem_len _A : Tuple = same_length _A : Optional[int] = attn_type _A : Any = clamp_len _A : Dict = sample_softmax _A : Any = adaptive _A : List[str] = dropout _A : List[str] = dropatt _A : Union[str, Any] = untie_r _A : Optional[int] = init _A : Union[str, Any] = init_range _A : Tuple = proj_init_std _A : int = init_std _A : List[str] = layer_norm_epsilon super().__init__(eos_token_id=_a , **_a ) @property def a__ ( self ) -> int: # Message copied from Transformer-XL documentation logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def a__ ( self , _a ) -> Tuple: # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): @register_to_config def __init__( self , *, _a = 4 , _a = 768 , _a , _a , ) -> str: super().__init__() _A : List[Any] = nn.Parameter(torch.zeros(_a ) ) # parameters for additional clip time embeddings _A : Union[str, Any] = nn.Linear(_a , _a ) _A : List[str] = nn.Linear(_a , _a ) # parameters for encoder hidden states _A : List[str] = clip_extra_context_tokens _A : Optional[int] = nn.Linear( _a , self.clip_extra_context_tokens * cross_attention_dim ) _A : Union[str, Any] = nn.Linear(_a , _a ) _A : Tuple = nn.LayerNorm(_a ) def a__ ( self , *, _a , _a , _a , _a ) -> Union[str, Any]: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _A : Union[str, Any] = image_embeddings.shape[0] _A : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _A : str = classifier_free_guidance_embeddings.expand( _a , -1 ) _A : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _A : Any = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _A : Any = self.embedding_proj(_a ) _A : Dict = self.clip_image_embeddings_project_to_time_embeddings(_a ) _A : List[str] = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _A : Optional[int] = self.clip_extra_context_tokens_proj(_a ) _A : Union[str, Any] = clip_extra_context_tokens.reshape(_a , -1 , self.clip_extra_context_tokens ) _A : str = clip_extra_context_tokens.permute(0 , 2 , 1 ) _A : Dict = self.encoder_hidden_states_proj(_a ) _A : Any = self.text_encoder_hidden_states_norm(_a ) _A : Union[str, Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
from math import pi, sqrt def lowerCAmelCase_ ( snake_case_ ): if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(snake_case_ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(snake_case_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowerCAmelCase_ ( ): assert gamma(0.5 ) == sqrt(snake_case_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case = 1.0 while num: _snake_case = float(input("Gamma of: ")) print(f"""gamma({num}) = {gamma(num)}""") print("\nEnter 0 to exit...")
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class lowercase ( unittest.TestCase,UpperCamelCase__ ): def a__ ( self ) -> Union[str, Any]: _A : Union[str, Any] = load_tool("""text-classification""" ) self.tool.setup() _A : Optional[Any] = load_tool("""text-classification""" , remote=_a ) def a__ ( self ) -> str: _A : str = self.tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(_a , """positive""" ) def a__ ( self ) -> Union[str, Any]: _A : Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(_a , """positive""" ) def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(_a , """positive""" ) def a__ ( self ) -> Dict: _A : List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(_a , """positive""" )
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
class lowercase : def __init__( self ) -> List[str]: _A : List[Any] = {} def a__ ( self ) -> None: print(self.vertex ) for i in self.vertex: print(_a , """ -> """ , """ -> """.join([str(_a ) for j in self.vertex[i]] ) ) def a__ ( self , _a , _a ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(_a ) else: # else make a new vertex _A : Dict = [to_vertex] def a__ ( self ) -> None: # visited array for storing already visited nodes _A : List[Any] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_a , _a ) def a__ ( self , _a , _a ) -> None: # mark start vertex as visited _A : Tuple = True print(_a , end=""" """ ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_a , _a ) if __name__ == "__main__": _snake_case = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class lowercase ( unittest.TestCase ): _a = StableDiffusionLDMaDPipeline _a = TEXT_TO_IMAGE_PARAMS _a = TEXT_TO_IMAGE_BATCH_PARAMS _a = TEXT_TO_IMAGE_IMAGE_PARAMS def a__ ( self ) -> List[str]: torch.manual_seed(0 ) _A : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _A : Any = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , ) torch.manual_seed(0 ) _A : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _A : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A : Optional[int] = CLIPTextModel(_a ) _A : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _A : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self , _a , _a=0 ) -> Tuple: if str(_a ).startswith("""mps""" ): _A : Tuple = torch.manual_seed(_a ) else: _A : List[str] = torch.Generator(device=_a ).manual_seed(_a ) _A : str = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self ) -> List[Any]: _A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : Optional[int] = self.get_dummy_components() _A : Union[str, Any] = StableDiffusionLDMaDPipeline(**_a ) _A : Optional[int] = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _A : Dict = self.get_dummy_inputs(_a ) _A : Optional[Any] = ldmad_pipe(**_a ) _A , _A : str = output.rgb, output.depth _A : Union[str, Any] = rgb[0, -3:, -3:, -1] _A : Tuple = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) _A : Any = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] ) _A : Dict = np.array([103.46727, 85.812004, 87.849236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def a__ ( self ) -> List[str]: _A : int = self.get_dummy_components() _A : Optional[Any] = StableDiffusionLDMaDPipeline(**_a ) _A : Any = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _A : str = self.get_dummy_inputs(_a ) _A : List[str] = 3 * [inputs["""prompt"""]] # forward _A : Tuple = ldmad_pipe(**_a ) _A , _A : Optional[int] = output.rgb, output.depth _A : Dict = rgb_slice_a[0, -3:, -3:, -1] _A : int = depth_slice_a[0, -3:, -1] _A : Optional[Any] = self.get_dummy_inputs(_a ) _A : Optional[Any] = 3 * [inputs.pop("""prompt""" )] _A : Optional[Any] = ldmad_pipe.tokenizer( _a , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , ) _A : Optional[Any] = text_inputs["""input_ids"""].to(_a ) _A : Dict = ldmad_pipe.text_encoder(_a )[0] _A : Dict = prompt_embeds # forward _A : Dict = ldmad_pipe(**_a ) _A , _A : Optional[Any] = output.rgb, output.depth _A : Any = rgb_slice_a[0, -3:, -3:, -1] _A : Any = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def a__ ( self ) -> Tuple: _A : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : List[str] = self.get_dummy_components() _A : Optional[int] = PNDMScheduler(skip_prk_steps=_a ) _A : Optional[int] = StableDiffusionLDMaDPipeline(**_a ) _A : int = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _A : Any = self.get_dummy_inputs(_a ) _A : Any = """french fries""" _A : List[Any] = ldmad_pipe(**_a , negative_prompt=_a ) _A , _A : Dict = output.rgb, output.depth _A : Dict = rgb[0, -3:, -3:, -1] _A : int = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) _A : int = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] ) _A : str = np.array([107.84738, 84.62802, 89.962135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def a__ ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Any: _A : Tuple = torch.Generator(device=_a ).manual_seed(_a ) _A : Union[str, Any] = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) ) _A : Dict = torch.from_numpy(_a ).to(device=_a , dtype=_a ) _A : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self ) -> str: _A : List[str] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ) _A : List[str] = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _A : Optional[Any] = self.get_inputs(_a ) _A : Optional[Any] = ldmad_pipe(**_a ) _A , _A : Union[str, Any] = output.rgb, output.depth _A : Optional[Any] = rgb[0, -3:, -3:, -1].flatten() _A : Dict = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) _A : List[str] = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] ) _A : Any = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): def a__ ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Any: _A : List[Any] = torch.Generator(device=_a ).manual_seed(_a ) _A : Optional[Any] = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) ) _A : int = torch.from_numpy(_a ).to(device=_a , dtype=_a ) _A : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self ) -> Dict: _A : List[str] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _A : List[Any] = self.get_inputs(_a ) _A : Dict = ldmad_pipe(**_a ) _A , _A : Dict = output.rgb, output.depth _A : Any = 0.495586 _A : str = 0.33795515 _A : List[Any] = 112.48518 _A : Optional[int] = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def a__ ( self ) -> Optional[int]: _A : Dict = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _A : Any = self.get_inputs(_a ) _A : int = ldmad_pipe(**_a ) _A , _A : str = output.rgb, output.depth _A : Any = 0.4194127 _A : int = 0.35375586 _A : int = 0.5638502 _A : Tuple = 0.34686103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _snake_case = TypeVar("T") class lowercase ( Generic[T] ): def __init__( self , _a ) -> str: _A : Union[str, Any] = data _A : Node[T] | None = None def __str__( self ) -> str: return F'''{self.data}''' class lowercase ( Generic[T] ): def __init__( self ) -> None: _A : Node[T] | None = None def __iter__( self ) -> Iterator[T]: _A : Union[str, Any] = self.top while node: yield node.data _A : Any = node.next def __str__( self ) -> str: return "->".join([str(_a ) for item in self] ) def __len__( self ) -> int: return len(tuple(iter(self ) ) ) def a__ ( self ) -> bool: return self.top is None def a__ ( self , _a ) -> None: _A : Any = Node(_a ) if not self.is_empty(): _A : int = self.top _A : List[str] = node def a__ ( self ) -> T: if self.is_empty(): raise IndexError("""pop from empty stack""" ) assert isinstance(self.top , _a ) _A : int = self.top _A : Any = self.top.next return pop_node.data def a__ ( self ) -> T: if self.is_empty(): raise IndexError("""peek from empty stack""" ) assert self.top is not None return self.top.data def a__ ( self ) -> None: _A : Dict = None if __name__ == "__main__": from doctest import testmod testmod()
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
from __future__ import annotations import math def lowerCAmelCase_ ( snake_case_ ): if num <= 0: _A : int = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(snake_case_ ) _A : Optional[Any] = [True] * (num + 1) _A : Optional[Any] = [] _A : Optional[Any] = 2 _A : Tuple = int(math.sqrt(snake_case_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(snake_case_ ) # Set multiples of start be False for i in range(start * start,num + 1,snake_case_ ): if sieve[i] is True: _A : Union[str, Any] = False start += 1 for j in range(end + 1,num + 1 ): if sieve[j] is True: prime.append(snake_case_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input("Enter a positive integer: ").strip())))
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
def lowerCAmelCase_ ( snake_case_ ): if any(not isinstance(snake_case_,snake_case_ ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(snake_case_ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(snake_case_,sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a=None , _a=None , _a=0 ) -> List[Any]: _A : int = 1.0 if scale is None else scale _A : Dict = 0.0 if loc is None else loc super().__init__(_a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_a )] ) @property def a__ ( self ) -> Tuple: return self.base_dist.mean * self.scale + self.loc @property def a__ ( self ) -> int: return self.base_dist.variance * self.scale**2 @property def a__ ( self ) -> Tuple: return self.variance.sqrt() class lowercase ( nn.Module ): def __init__( self , _a , _a , _a , **_a ) -> None: super().__init__(**_a ) _A : List[Any] = args_dim _A : Optional[Any] = nn.ModuleList([nn.Linear(_a , _a ) for dim in args_dim.values()] ) _A : Dict = domain_map def a__ ( self , _a ) -> Tuple[torch.Tensor]: _A : Optional[Any] = [proj(_a ) for proj in self.proj] return self.domain_map(*_a ) class lowercase ( nn.Module ): def __init__( self , _a ) -> List[Any]: super().__init__() _A : Any = function def a__ ( self , _a , *_a ) -> Optional[int]: return self.function(_a , *_a ) class lowercase : _a = 42 _a = 42 _a = 42 def __init__( self , _a = 1 ) -> None: _A : Tuple = dim _A : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def a__ ( self , _a ) -> Optional[int]: if self.dim == 1: return self.distribution_class(*_a ) else: return Independent(self.distribution_class(*_a ) , 1 ) def a__ ( self , _a , _a = None , _a = None , ) -> Distribution: _A : Union[str, Any] = self._base_distribution(_a ) if loc is None and scale is None: return distr else: return AffineTransformed(_a , loc=_a , scale=_a , event_dim=self.event_dim ) @property def a__ ( self ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def a__ ( self ) -> int: return len(self.event_shape ) @property def a__ ( self ) -> float: return 0.0 def a__ ( self , _a ) -> nn.Module: return ParameterProjection( in_features=_a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def a__ ( self , *_a ) -> Dict: raise NotImplementedError() @staticmethod def a__ ( _a ) -> torch.Tensor: return (x + torch.sqrt(torch.square(_a ) + 4.0 )) / 2.0 class lowercase ( UpperCamelCase__ ): _a = {"df": 1, "loc": 1, "scale": 1} _a = StudentT @classmethod def a__ ( cls , _a , _a , _a ) -> int: _A : int = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps ) _A : Any = 2.0 + cls.squareplus(_a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class lowercase ( UpperCamelCase__ ): _a = {"loc": 1, "scale": 1} _a = Normal @classmethod def a__ ( cls , _a , _a ) -> List[Any]: _A : List[str] = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class lowercase ( UpperCamelCase__ ): _a = {"total_count": 1, "logits": 1} _a = NegativeBinomial @classmethod def a__ ( cls , _a , _a ) -> Union[str, Any]: _A : List[str] = cls.squareplus(_a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def a__ ( self , _a ) -> Distribution: _A , _A : Tuple = distr_args if self.dim == 1: return self.distribution_class(total_count=_a , logits=_a ) else: return Independent(self.distribution_class(total_count=_a , logits=_a ) , 1 ) def a__ ( self , _a , _a = None , _a = None ) -> Distribution: _A , _A : List[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[Any] = checkpoint _A : Optional[Any] = {} _A : str = vae_state_dict["""encoder.conv_in.weight"""] _A : int = vae_state_dict["""encoder.conv_in.bias"""] _A : Dict = vae_state_dict["""encoder.conv_out.weight"""] _A : Tuple = vae_state_dict["""encoder.conv_out.bias"""] _A : List[Any] = vae_state_dict["""encoder.norm_out.weight"""] _A : Tuple = vae_state_dict["""encoder.norm_out.bias"""] _A : Optional[int] = vae_state_dict["""decoder.conv_in.weight"""] _A : Tuple = vae_state_dict["""decoder.conv_in.bias"""] _A : Union[str, Any] = vae_state_dict["""decoder.conv_out.weight"""] _A : Tuple = vae_state_dict["""decoder.conv_out.bias"""] _A : Tuple = vae_state_dict["""decoder.norm_out.weight"""] _A : List[str] = vae_state_dict["""decoder.norm_out.bias"""] _A : Optional[int] = vae_state_dict["""quant_conv.weight"""] _A : List[Any] = vae_state_dict["""quant_conv.bias"""] _A : Optional[Any] = vae_state_dict["""post_quant_conv.weight"""] _A : int = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only _A : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) _A : Optional[Any] = { layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the decoder up blocks only _A : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) _A : int = { layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(snake_case_ ) } for i in range(snake_case_ ): _A : str = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key] if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: _A : Any = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.weight''' ) _A : List[str] = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.bias''' ) _A : Optional[Any] = renew_vae_resnet_paths(snake_case_ ) _A : str = {"""old""": f'''down.{i}.block''', """new""": f'''down_blocks.{i}.resnets'''} assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ ) _A : List[str] = [key for key in vae_state_dict if """encoder.mid.block""" in key] _A : Union[str, Any] = 2 for i in range(1,num_mid_res_blocks + 1 ): _A : str = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key] _A : str = renew_vae_resnet_paths(snake_case_ ) _A : str = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ ) _A : str = [key for key in vae_state_dict if """encoder.mid.attn""" in key] _A : int = renew_vae_attention_paths(snake_case_ ) _A : List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ ) conv_attn_to_linear(snake_case_ ) for i in range(snake_case_ ): _A : List[str] = num_up_blocks - 1 - i _A : List[str] = [ key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key ] if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: _A : Union[str, Any] = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.weight''' ] _A : List[str] = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.bias''' ] _A : str = renew_vae_resnet_paths(snake_case_ ) _A : int = {"""old""": f'''up.{block_id}.block''', """new""": f'''up_blocks.{i}.resnets'''} assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ ) _A : Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key] _A : Tuple = 2 for i in range(1,num_mid_res_blocks + 1 ): _A : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key] _A : Any = renew_vae_resnet_paths(snake_case_ ) _A : List[str] = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ ) _A : Union[str, Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key] _A : Any = renew_vae_attention_paths(snake_case_ ) _A : Optional[int] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ ) conv_attn_to_linear(snake_case_ ) return new_checkpoint def lowerCAmelCase_ ( snake_case_,snake_case_,): # Only support V1 _A : Tuple = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) _A : Tuple = io.BytesIO(r.content ) _A : List[Any] = OmegaConf.load(snake_case_ ) _A : Optional[int] = 512 _A : Dict = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open _A : Tuple = {} with safe_open(snake_case_,framework="""pt""",device="""cpu""" ) as f: for key in f.keys(): _A : int = f.get_tensor(snake_case_ ) else: _A : int = torch.load(snake_case_,map_location=snake_case_ )["""state_dict"""] # Convert the VAE model. _A : List[str] = create_vae_diffusers_config(snake_case_,image_size=snake_case_ ) _A : Any = custom_convert_ldm_vae_checkpoint(snake_case_,snake_case_ ) _A : List[Any] = AutoencoderKL(**snake_case_ ) vae.load_state_dict(snake_case_ ) vae.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") _snake_case = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _snake_case = { "configuration_blip": [ "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlipConfig", "BlipTextConfig", "BlipVisionConfig", ], "processing_blip": ["BlipProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["BlipImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "BlipModel", "BlipPreTrainedModel", "BlipForConditionalGeneration", "BlipForQuestionAnswering", "BlipVisionModel", "BlipTextModel", "BlipForImageTextRetrieval", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBlipModel", "TFBlipPreTrainedModel", "TFBlipForConditionalGeneration", "TFBlipForQuestionAnswering", "TFBlipVisionModel", "TFBlipTextModel", "TFBlipForImageTextRetrieval", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: _A : str = tempfile.mkdtemp() _A : str = BlipImageProcessor() _A : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) _A : Union[str, Any] = BlipaProcessor(_a , _a ) processor.save_pretrained(self.tmpdirname ) def a__ ( self , **_a ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer def a__ ( self , **_a ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor def a__ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def a__ ( self ) -> Any: _A : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _A : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self ) -> List[Any]: _A : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _A : Optional[int] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _A : List[Any] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def a__ ( self ) -> str: _A : Any = self.get_image_processor() _A : Optional[Any] = self.get_tokenizer() _A : List[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : List[str] = self.prepare_image_inputs() _A : Optional[Any] = image_processor(_a , return_tensors="""np""" ) _A : Optional[int] = processor(images=_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a__ ( self ) -> List[Any]: _A : Tuple = self.get_image_processor() _A : List[str] = self.get_tokenizer() _A : str = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : Optional[Any] = """lower newer""" _A : Tuple = processor(text=_a ) _A : Optional[int] = tokenizer(_a , return_token_type_ids=_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a__ ( self ) -> Optional[Any]: _A : str = self.get_image_processor() _A : List[str] = self.get_tokenizer() _A : Dict = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : int = """lower newer""" _A : List[str] = self.prepare_image_inputs() _A : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def a__ ( self ) -> Optional[Any]: _A : List[Any] = self.get_image_processor() _A : Optional[int] = self.get_tokenizer() _A : str = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A : Tuple = processor.batch_decode(_a ) _A : Optional[Any] = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def a__ ( self ) -> Any: _A : List[str] = self.get_image_processor() _A : Any = self.get_tokenizer() _A : Optional[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a ) _A : Tuple = """lower newer""" _A : str = self.prepare_image_inputs() _A : Any = processor(text=_a , images=_a ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1
from typing import Any import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.array_equal(snake_case_,matrix.conjugate().T ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Union[str, Any] = v.conjugate().T _A : Any = v_star.dot(snake_case_ ) assert isinstance(snake_case_,np.ndarray ) return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ )) def lowerCAmelCase_ ( ): _A : str = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) _A : str = np.array([[1], [2], [3]] ) assert is_hermitian(snake_case_ ), f'''{a} is not hermitian.''' print(rayleigh_quotient(snake_case_,snake_case_ ) ) _A : List[str] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(snake_case_ ), f'''{a} is not hermitian.''' assert rayleigh_quotient(snake_case_,snake_case_ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def lowerCAmelCase_ ( snake_case_=None ): if subparsers is not None: _A : List[str] = subparsers.add_parser("""test""" ) else: _A : str = argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""",default=snake_case_,help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ),) if subparsers is not None: parser.set_defaults(func=snake_case_ ) return parser def lowerCAmelCase_ ( snake_case_ ): _A : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: _A : Tuple = script_name else: _A : Tuple = f'''--config_file={args.config_file} {script_name}''' _A : int = ["""accelerate-launch"""] + test_args.split() _A : str = execute_subprocess_async(snake_case_,env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def lowerCAmelCase_ ( ): _A : List[Any] = test_command_parser() _A : Any = parser.parse_args() test_command(snake_case_ ) if __name__ == "__main__": main()
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
_snake_case = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): _A : Union[str, Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: _A : Union[str, Any] = 192 _A : str = 768 _A : List[Any] = 12 _A : str = 3 _A : int = [800, 1333] _A : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": _A : List[str] = 330 _A : Optional[Any] = 14 _A : List[Any] = 6 _A : Optional[int] = 1320 elif "yolos_s" in yolos_name: _A : Tuple = 384 _A : Any = 1536 _A : Any = 12 _A : List[Any] = 6 elif "yolos_b" in yolos_name: _A : int = [800, 1344] _A : Tuple = 91 _A : str = """huggingface/label-files""" _A : Tuple = """coco-detection-id2label.json""" _A : Any = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : Dict = {int(snake_case_ ): v for k, v in idalabel.items()} _A : Tuple = idalabel _A : Any = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _A : int = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A : Union[str, Any] = in_proj_weight[: config.hidden_size, :] _A : Union[str, Any] = in_proj_bias[: config.hidden_size] _A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A : Any = in_proj_weight[-config.hidden_size :, :] _A : Optional[int] = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( snake_case_ ): if "backbone" in name: _A : str = name.replace("""backbone""","""vit""" ) if "cls_token" in name: _A : Any = name.replace("""cls_token""","""embeddings.cls_token""" ) if "det_token" in name: _A : int = name.replace("""det_token""","""embeddings.detection_tokens""" ) if "mid_pos_embed" in name: _A : int = name.replace("""mid_pos_embed""","""encoder.mid_position_embeddings""" ) if "pos_embed" in name: _A : Optional[int] = name.replace("""pos_embed""","""embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _A : str = name.replace("""patch_embed.proj""","""embeddings.patch_embeddings.projection""" ) if "blocks" in name: _A : Union[str, Any] = name.replace("""blocks""","""encoder.layer""" ) if "attn.proj" in name: _A : Tuple = name.replace("""attn.proj""","""attention.output.dense""" ) if "attn" in name: _A : str = name.replace("""attn""","""attention.self""" ) if "norm1" in name: _A : Tuple = name.replace("""norm1""","""layernorm_before""" ) if "norm2" in name: _A : Optional[Any] = name.replace("""norm2""","""layernorm_after""" ) if "mlp.fc1" in name: _A : Optional[int] = name.replace("""mlp.fc1""","""intermediate.dense""" ) if "mlp.fc2" in name: _A : Dict = name.replace("""mlp.fc2""","""output.dense""" ) if "class_embed" in name: _A : Union[str, Any] = name.replace("""class_embed""","""class_labels_classifier""" ) if "bbox_embed" in name: _A : Any = name.replace("""bbox_embed""","""bbox_predictor""" ) if "vit.norm" in name: _A : Union[str, Any] = name.replace("""vit.norm""","""vit.layernorm""" ) return name def lowerCAmelCase_ ( snake_case_,snake_case_ ): for key in orig_state_dict.copy().keys(): _A : Tuple = orig_state_dict.pop(snake_case_ ) if "qkv" in key: _A : str = key.split(""".""" ) _A : Tuple = int(key_split[2] ) _A : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: _A : Optional[Any] = val[:dim, :] _A : str = val[ dim : dim * 2, : ] _A : List[str] = val[-dim:, :] else: _A : Optional[int] = val[:dim] _A : Union[str, Any] = val[dim : dim * 2] _A : Tuple = val[-dim:] else: _A : List[Any] = val return orig_state_dict def lowerCAmelCase_ ( ): _A : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" _A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = False ): _A : Optional[int] = get_yolos_config(snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" )["""model"""] # load 🤗 model _A : Tuple = YolosForObjectDetection(snake_case_ ) model.eval() _A : Optional[int] = convert_state_dict(snake_case_,snake_case_ ) model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by YolosImageProcessor _A : List[str] = 800 if yolos_name != """yolos_ti""" else 512 _A : Optional[Any] = YolosImageProcessor(format="""coco_detection""",size=snake_case_ ) _A : Dict = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : List[Any] = model(**snake_case_ ) _A , _A : Optional[int] = outputs.logits, outputs.pred_boxes _A , _A : int = None, None if yolos_name == "yolos_ti": _A : Any = torch.tensor( [[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] ) _A : Optional[Any] = torch.tensor( [[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] ) elif yolos_name == "yolos_s_200_pre": _A : Union[str, Any] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] ) _A : str = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] ) elif yolos_name == "yolos_s_300_pre": _A : str = torch.tensor( [[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] ) _A : Optional[int] = torch.tensor( [[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] ) elif yolos_name == "yolos_s_dWr": _A : str = torch.tensor( [[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] ) _A : Any = torch.tensor( [[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] ) elif yolos_name == "yolos_base": _A : Any = torch.tensor( [[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] ) _A : List[str] = torch.tensor( [[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3],snake_case_,atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: _A : Optional[int] = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) _A : int = model_mapping[yolos_name] image_processor.push_to_hub(snake_case_,organization="""hustvl""" ) model.push_to_hub(snake_case_,organization="""hustvl""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _snake_case = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase : def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Any: _A : Tuple = parent _A : int = batch_size _A : int = image_size _A : List[str] = num_channels _A : Optional[int] = embeddings_size _A : int = hidden_sizes _A : Any = depths _A : Dict = is_training _A : Union[str, Any] = use_labels _A : List[str] = hidden_act _A : Any = num_labels _A : int = scope _A : Any = len(_a ) def a__ ( self ) -> Tuple: _A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A : Optional[Any] = None if self.use_labels: _A : Tuple = ids_tensor([self.batch_size] , self.num_labels ) _A : Union[str, Any] = self.get_config() return config, pixel_values, labels def a__ ( self ) -> List[str]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def a__ ( self , _a , _a , _a ) -> str: _A : List[str] = TFRegNetModel(config=_a ) _A : Optional[Any] = model(_a , training=_a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self , _a , _a , _a ) -> Dict: _A : List[str] = self.num_labels _A : str = TFRegNetForImageClassification(_a ) _A : List[Any] = model(_a , labels=_a , training=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self ) -> Optional[Any]: _A : int = self.prepare_config_and_inputs() _A , _A , _A : List[str] = config_and_inputs _A : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () _a = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) _a = False _a = False _a = False _a = False _a = False def a__ ( self ) -> Optional[Any]: _A : Optional[int] = TFRegNetModelTester(self ) _A : str = ConfigTester(self , config_class=_a , has_text_modality=_a ) def a__ ( self ) -> Union[str, Any]: return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def a__ ( self ) -> Optional[int]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def a__ ( self ) -> List[Any]: super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def a__ ( self ) -> Any: pass def a__ ( self ) -> str: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Dict = model_class(_a ) _A : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : Optional[Any] = [*signature.parameters.keys()] _A : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def a__ ( self ) -> Dict: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def a__ ( self ) -> Dict: def check_hidden_states_output(_a , _a , _a ): _A : int = model_class(_a ) _A : Optional[Any] = model(**self._prepare_for_class(_a , _a ) , training=_a ) _A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _A : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _A : Dict = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: _A : Tuple = layer_type _A : str = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Any = True check_hidden_states_output(_a , _a , _a ) def a__ ( self ) -> Dict: _A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_a , _a , _a , _a={} ): _A : Dict = model(_a , return_dict=_a , **_a ) _A : int = model(_a , return_dict=_a , **_a ).to_tuple() def recursive_check(_a , _a ): if isinstance(_a , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_a , _a ): recursive_check(_a , _a ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_a , _a ) ) , msg=( """Tuple and dict output are not equal. Difference:""" F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(_a , _a ) for model_class in self.all_model_classes: _A : Dict = model_class(_a ) _A : str = self._prepare_for_class(_a , _a ) _A : Dict = self._prepare_for_class(_a , _a ) check_equivalence(_a , _a , _a ) _A : Optional[Any] = self._prepare_for_class(_a , _a , return_labels=_a ) _A : List[str] = self._prepare_for_class(_a , _a , return_labels=_a ) check_equivalence(_a , _a , _a ) _A : Optional[Any] = self._prepare_for_class(_a , _a ) _A : List[Any] = self._prepare_for_class(_a , _a ) check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} ) _A : int = self._prepare_for_class(_a , _a , return_labels=_a ) _A : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a ) check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} ) def a__ ( self ) -> List[Any]: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def a__ ( self ) -> Optional[int]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A : List[Any] = TFRegNetModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def lowerCAmelCase_ ( ): _A : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowercase ( unittest.TestCase ): @cached_property def a__ ( self ) -> str: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a__ ( self ) -> Dict: _A : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _A : str = self.default_image_processor _A : Dict = prepare_img() _A : int = image_processor(images=_a , return_tensors="""tf""" ) # forward pass _A : Any = model(**_a , training=_a ) # verify the logits _A : str = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) _A : str = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1e-4 )
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
def lowerCAmelCase_ ( ): return [ a * b * (1000 - a - b) for a in range(1,999 ) for b in range(snake_case_,999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case = logging.get_logger(__name__) class lowercase ( UpperCamelCase__ ): _a = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = IMAGENET_DEFAULT_MEAN , _a = IMAGENET_DEFAULT_STD , **_a , ) -> None: super().__init__(**_a ) _A : Any = size if size is not None else {"""shortest_edge""": 224} _A : str = get_size_dict(_a , default_to_square=_a ) _A : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _A : int = get_size_dict(_a , param_name="""crop_size""" ) _A : str = do_resize _A : List[str] = size _A : str = resample _A : Dict = do_center_crop _A : Optional[Any] = crop_size _A : Optional[int] = do_rescale _A : Optional[Any] = rescale_factor _A : Tuple = do_normalize _A : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _A : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: _A : Optional[Any] = get_size_dict(_a , default_to_square=_a ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _A : Optional[Any] = int((256 / 224) * size["""shortest_edge"""] ) _A : Any = get_resize_output_image_size(_a , size=_a , default_to_square=_a ) _A : str = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _a , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_a , data_format=_a , **_a ) def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: _A : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def a__ ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> BatchFeature: _A : Dict = do_resize if do_resize is not None else self.do_resize _A : List[Any] = resample if resample is not None else self.resample _A : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _A : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _A : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _A : Optional[int] = image_mean if image_mean is not None else self.image_mean _A : int = image_std if image_std is not None else self.image_std _A : List[Any] = size if size is not None else self.size _A : Any = get_size_dict(_a , default_to_square=_a ) _A : Optional[int] = crop_size if crop_size is not None else self.crop_size _A : Optional[Any] = get_size_dict(_a , param_name="""crop_size""" ) _A : List[str] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _A : int = [to_numpy_array(_a ) for image in images] if do_resize: _A : Tuple = [self.resize(_a , _a , _a ) for image in images] if do_center_crop: _A : Dict = [self.center_crop(_a , _a ) for image in images] if do_rescale: _A : Dict = [self.rescale(_a , _a ) for image in images] if do_normalize: _A : Optional[int] = [self.normalize(_a , _a , _a ) for image in images] _A : str = [to_channel_dimension_format(_a , _a ) for image in images] _A : int = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a )
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = 0 ): _A : Optional[int] = right or len(snake_case_ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(snake_case_,snake_case_,left + 1,right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase ( UpperCamelCase__ ): _a = ["image_processor", "tokenizer"] _a = "LayoutLMv3ImageProcessor" _a = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Tuple: _A : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _a , ) _A : Optional[int] = kwargs.pop("""feature_extractor""" ) _A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a , _a ) def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor _A : Dict = self.image_processor(images=_a , return_tensors=_a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_a , _a ): _A : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension) _A : Any = features["""words"""] _A : Optional[Any] = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel values _A : Tuple = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: _A : Union[str, Any] = self.get_overflowing_images(_a , encoded_inputs["""overflow_to_sample_mapping"""] ) _A : Union[str, Any] = images return encoded_inputs def a__ ( self , _a , _a ) -> Optional[int]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _A : List[str] = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_a ) != len(_a ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F''' {len(_a )} and {len(_a )}''' ) return images_with_overflow def a__ ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.batch_decode(*_a , **_a ) def a__ ( self , *_a , **_a ) -> List[Any]: return self.tokenizer.decode(*_a , **_a ) @property def a__ ( self ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def a__ ( self ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , ) return self.image_processor_class @property def a__ ( self ) -> int: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , ) return self.image_processor
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( UpperCamelCase__ ): _a = (UnCLIPScheduler,) def a__ ( self , **_a ) -> Any: _A : Dict = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_a ) return config def a__ ( self ) -> Union[str, Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> str: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_a ) def a__ ( self ) -> List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_a ) def a__ ( self ) -> Optional[Any]: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_a ) def a__ ( self ) -> str: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Tuple: for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_a , prev_timestep=_a ) def a__ ( self ) -> str: _A : str = self.scheduler_classes[0] _A : str = self.get_scheduler_config(variance_type="""fixed_small_log""" ) _A : Optional[Any] = scheduler_class(**_a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5 def a__ ( self ) -> int: _A : Dict = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config(variance_type="""learned_range""" ) _A : Optional[Any] = scheduler_class(**_a ) _A : str = 0.5 assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.1712790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.7998052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.0010011 < 1e-5 def a__ ( self ) -> Any: _A : Any = self.scheduler_classes[0] _A : Tuple = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) _A : Optional[int] = scheduler.timesteps _A : Union[str, Any] = self.dummy_model() _A : Any = self.dummy_sample_deter _A : List[str] = torch.manual_seed(0 ) for i, t in enumerate(_a ): # 1. predict noise residual _A : Dict = model(_a , _a ) # 2. predict previous mean of sample x_t-1 _A : List[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample _A : Any = pred_prev_sample _A : int = torch.sum(torch.abs(_a ) ) _A : Optional[int] = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 252.2682495 ) < 1e-2 assert abs(result_mean.item() - 0.3284743 ) < 1e-3 def a__ ( self ) -> Any: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config() _A : int = scheduler_class(**_a ) scheduler.set_timesteps(25 ) _A : List[str] = scheduler.timesteps _A : str = self.dummy_model() _A : Union[str, Any] = self.dummy_sample_deter _A : str = torch.manual_seed(0 ) for i, t in enumerate(_a ): # 1. predict noise residual _A : List[Any] = model(_a , _a ) if i + 1 == timesteps.shape[0]: _A : Dict = None else: _A : Optional[Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _A : Any = scheduler.step( _a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample _A : int = pred_prev_sample _A : Optional[int] = torch.sum(torch.abs(_a ) ) _A : int = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 258.2044983 ) < 1e-2 assert abs(result_mean.item() - 0.3362038 ) < 1e-3 def a__ ( self ) -> Dict: pass def a__ ( self ) -> Optional[Any]: pass
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
import torch def lowerCAmelCase_ ( ): if torch.cuda.is_available(): _A : Dict = torch.cuda.device_count() else: _A : Union[str, Any] = 0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n" def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=8 ): _A : Tuple = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _A : Union[str, Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , ) -> List[str]: super().__init__() self.register_modules( unet=_a , scheduler=_a , movq=_a , ) _A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> List[str]: if latents is None: _A : Union[str, Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Tuple = latents.to(_a ) _A : Union[str, Any] = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> str: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : Optional[int] = torch.device(F'''cuda:{gpu_id}''' ) _A : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) def a__ ( self , _a=0 ) -> Any: if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=_a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _A : Optional[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: _A , _A : List[str] = cpu_offload_with_hook(_a , _a , prev_module_hook=_a ) # We'll offload the last model manually. _A : int = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def a__ ( self ) -> str: if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 100 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ) -> Optional[Any]: _A : str = self._execution_device _A : Optional[Any] = guidance_scale > 1.0 if isinstance(_a , _a ): _A : List[Any] = torch.cat(_a , dim=0 ) if isinstance(_a , _a ): _A : Optional[Any] = torch.cat(_a , dim=0 ) if isinstance(_a , _a ): _A : Union[str, Any] = torch.cat(_a , dim=0 ) _A : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _A : str = image_embeds.repeat_interleave(_a , dim=0 ) _A : Tuple = negative_image_embeds.repeat_interleave(_a , dim=0 ) _A : Union[str, Any] = hint.repeat_interleave(_a , dim=0 ) _A : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a ) _A : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_a ) self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[Any] = self.scheduler.timesteps _A : str = self.movq.config.latent_channels _A , _A : Dict = downscale_height_and_width(_a , _a , self.movq_scale_factor ) # create initial latent _A : Any = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = {"""image_embeds""": image_embeds, """hint""": hint} _A : List[Any] = self.unet( sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0] if do_classifier_free_guidance: _A , _A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) _A , _A : Optional[int] = noise_pred.chunk(2 ) _A , _A : Optional[Any] = variance_pred.chunk(2 ) _A : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _A : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _A , _A : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _A : List[Any] = self.scheduler.step( _a , _a , _a , generator=_a , )[0] # post-processing _A : Dict = self.movq.decode(_a , force_not_quantize=_a )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: _A : Any = image * 0.5 + 0.5 _A : int = image.clamp(0 , 1 ) _A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _A : Any = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin _snake_case = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class lowercase : def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ) -> Optional[Any]: _A : str = d_model _A : Any = parent _A : List[str] = batch_size _A : Any = prediction_length _A : str = context_length _A : Any = cardinality _A : str = num_time_features _A : str = lags_sequence _A : List[Any] = embedding_dimension _A : int = is_training _A : Tuple = hidden_size _A : Any = num_hidden_layers _A : Optional[Any] = num_attention_heads _A : Tuple = intermediate_size _A : List[Any] = hidden_act _A : Tuple = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Any = context_length _A : str = prediction_length + label_length _A : int = label_length _A : List[str] = moving_average _A : Dict = autocorrelation_factor def a__ ( self ) -> List[str]: return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def a__ ( self , _a ) -> Optional[int]: _A : int = config.context_length + max(config.lags_sequence ) _A : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _A : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _A : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) _A : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] ) _A : str = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def a__ ( self ) -> Tuple: _A : List[Any] = self.get_config() _A : int = self.prepare_autoformer_inputs_dict(_a ) return config, inputs_dict def a__ ( self ) -> Optional[int]: _A , _A : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def a__ ( self , _a , _a ) -> Optional[Any]: _A : Dict = AutoformerModel(config=_a ).to(_a ).eval() _A : int = model(**_a ) _A : str = outputs.encoder_last_hidden_state _A : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _A : str = model.get_encoder() encoder.save_pretrained(_a ) _A : Optional[Any] = AutoformerEncoder.from_pretrained(_a ).to(_a ) _A , _A , _A , _A , _A : Optional[int] = model.create_network_inputs(**_a ) _A , _A : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _A : Union[str, Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _A : str = encoder(inputs_embeds=_a )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _A : str = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _A : Optional[int] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _A : Tuple = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _A : int = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _A : Tuple = model.get_decoder() decoder.save_pretrained(_a ) _A : Tuple = AutoformerDecoder.from_pretrained(_a ).to(_a ) _A : List[Any] = decoder( trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _a = (AutoformerForPrediction,) if is_torch_available() else () _a = {"feature-extraction": AutoformerModel} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def a__ ( self ) -> Dict: _A : Optional[int] = AutoformerModelTester(self ) _A : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a ) def a__ ( self ) -> int: self.config_tester.run_common_tests() def a__ ( self ) -> Optional[int]: _A , _A : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _A : Dict = model_class(_a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a ) _A , _A : Any = model_class.from_pretrained(_a , output_loading_info=_a ) self.assertEqual(info["""missing_keys"""] , [] ) def a__ ( self ) -> str: _A : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_a ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> str: _A : Union[str, Any] = inspect.signature(getattr(_a , """forward""" ) ) # The main input is the name of the argument after `self` _A : Union[str, Any] = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , _a ) def a__ ( self ) -> List[Any]: _A , _A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : str = model_class(_a ) _A : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : List[str] = [*signature.parameters.keys()] _A : Tuple = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(_a )] , _a ) def a__ ( self ) -> Optional[Any]: _A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = True _A : str = getattr(self.model_tester , """seq_length""" , _a ) _A : Dict = getattr(self.model_tester , """decoder_seq_length""" , _a ) _A : str = getattr(self.model_tester , """encoder_seq_length""" , _a ) _A : List[Any] = getattr(self.model_tester , """d_model""" , _a ) _A : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , _a ) _A : List[str] = d_model // num_attention_heads for model_class in self.all_model_classes: _A : Optional[Any] = True _A : List[str] = False _A : Optional[int] = True _A : Union[str, Any] = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _A : List[Any] = model(**self._prepare_for_class(_a , _a ) ) _A : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _A : Dict = True _A : List[Any] = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _A : int = model(**self._prepare_for_class(_a , _a ) ) _A : Tuple = outputs.encoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _A : List[str] = len(_a ) _A : int = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_a , _a ) # decoder attentions _A : Dict = outputs.decoder_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _A : Optional[Any] = outputs.cross_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _A : Dict = True _A : Any = True _A : str = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _A : List[str] = model(**self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + 2 , len(_a ) ) _A : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def a__ ( self ) -> int: super().test_retain_grad_hidden_states_attentions() def lowerCAmelCase_ ( snake_case_="train-batch.pt" ): _A : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""",filename=snake_case_,repo_type="""dataset""" ) _A : List[str] = torch.load(snake_case_,map_location=snake_case_ ) return batch @require_torch @slow class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: _A : Optional[int] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) _A : Any = prepare_batch() with torch.no_grad(): _A : Union[str, Any] = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] _A : List[Any] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , _a ) _A : str = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def a__ ( self ) -> Optional[Any]: _A : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) _A : Optional[Any] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _A : List[str] = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state _A : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , _a ) _A : Tuple = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def a__ ( self ) -> List[str]: _A : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) _A : Optional[int] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _A : str = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) _A : str = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , _a ) _A : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a ) _A : Dict = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : str = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _A : Union[str, Any] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _A : List[Any] = haversine_distance(snake_case_,snake_case_,snake_case_,snake_case_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _A : Dict = (b_lata + b_lata) / 2 _A : int = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _A : Union[str, Any] = (sin(snake_case_ ) ** 2) * (cos(snake_case_ ) ** 2) _A : str = cos(sigma / 2 ) ** 2 _A : List[str] = (sigma - sin(snake_case_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _A : List[str] = (cos(snake_case_ ) ** 2) * (sin(snake_case_ ) ** 2) _A : Optional[Any] = sin(sigma / 2 ) ** 2 _A : int = (sigma + sin(snake_case_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowercase ( unittest.TestCase ): _a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def a__ ( self , _a , _a , _a ) -> Any: _A : List[Any] = hf_hub_download( repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) _A : Tuple = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 ) _A : Optional[Any] = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def a__ ( self , _a , _a ) -> Any: for example in examples: _A : Optional[int] = video_classifier(_a ) self.assertEqual( _a , [ {"""score""": ANY(_a ), """label""": ANY(_a )}, {"""score""": ANY(_a ), """label""": ANY(_a )}, ] , ) @require_torch def a__ ( self ) -> str: _A : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" _A : Tuple = VideoMAEFeatureExtractor( size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} ) _A : Any = pipeline( """video-classification""" , model=_a , feature_extractor=_a , frame_sampling_rate=4 ) _A : Optional[int] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) _A : Any = video_classifier(_a , top_k=2 ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , ) _A : Optional[int] = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], ] , ) @require_tf def a__ ( self ) -> str: pass
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCAmelCase_ ( snake_case_ ): _A : List[str] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A , _A : List[str] = emb.weight.shape _A : Any = nn.Linear(snake_case_,snake_case_,bias=snake_case_ ) _A : str = emb.weight.data return lin_layer def lowerCAmelCase_ ( snake_case_,snake_case_="facebook/mbart-large-en-ro",snake_case_=False,snake_case_=False ): _A : Optional[Any] = torch.load(snake_case_,map_location="""cpu""" )["""model"""] remove_ignore_keys_(snake_case_ ) _A : Optional[int] = state_dict["""encoder.embed_tokens.weight"""].shape[0] _A : Optional[Any] = MBartConfig.from_pretrained(snake_case_,vocab_size=snake_case_ ) if mbart_aa and finetuned: _A : Union[str, Any] = """relu""" _A : str = state_dict["""decoder.embed_tokens.weight"""] _A : Optional[Any] = MBartForConditionalGeneration(snake_case_ ) model.model.load_state_dict(snake_case_ ) if finetuned: _A : Dict = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") _snake_case = parser.parse_args() _snake_case = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _snake_case = logging.get_logger(__name__) _snake_case = { "Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json", # See all DPT models at https://huggingface.co/models?filter=dpt } class lowercase ( UpperCamelCase__ ): _a = "dpt" def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=384 , _a=16 , _a=3 , _a=False , _a=True , _a=[2, 5, 8, 11] , _a="project" , _a=[4, 2, 1, 0.5] , _a=[96, 192, 384, 768] , _a=256 , _a=-1 , _a=False , _a=True , _a=0.4 , _a=255 , _a=0.1 , _a=[1, 1024, 24, 24] , _a=[0, 1] , _a=None , **_a , ) -> Optional[Any]: super().__init__(**_a ) _A : Any = hidden_size _A : Optional[int] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) _A : Union[str, Any] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } _A : Union[str, Any] = BitConfig(**_a ) elif isinstance(_a , _a ): logger.info("""Initializing the config with a `BiT` backbone.""" ) _A : Union[str, Any] = BitConfig(**_a ) elif isinstance(_a , _a ): _A : Optional[Any] = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) _A : Any = backbone_featmap_shape _A : Optional[Any] = neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: _A : Tuple = None _A : Union[str, Any] = None _A : Dict = [] _A : Union[str, Any] = num_hidden_layers _A : List[str] = num_attention_heads _A : Union[str, Any] = intermediate_size _A : Tuple = hidden_act _A : int = hidden_dropout_prob _A : Tuple = attention_probs_dropout_prob _A : List[str] = initializer_range _A : List[Any] = layer_norm_eps _A : str = image_size _A : str = patch_size _A : List[str] = num_channels _A : int = qkv_bias _A : Dict = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) _A : int = readout_type _A : List[str] = reassemble_factors _A : str = neck_hidden_sizes _A : Union[str, Any] = fusion_hidden_size _A : List[Any] = head_in_index _A : List[Any] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _A : Tuple = use_auxiliary_head _A : Union[str, Any] = auxiliary_loss_weight _A : Tuple = semantic_loss_ignore_index _A : List[Any] = semantic_classifier_dropout def a__ ( self ) -> Union[str, Any]: _A : str = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _A : Tuple = self.backbone_config.to_dict() _A : Tuple = self.__class__.model_type return output
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def lowerCAmelCase_ ( snake_case_=None,snake_case_=None ): return field(default_factory=lambda: default,metadata=snake_case_ ) @dataclass class lowercase : _a = field( metadata={"help": "The csv file to plot."},) _a = field( default=UpperCamelCase__,metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."},) _a = field( default=UpperCamelCase__,metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."},) _a = field( default=UpperCamelCase__,metadata={"help": "Disable logarithmic scale when plotting"},) _a = field( default=UpperCamelCase__,metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." },) _a = field( default=UpperCamelCase__,metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."},) _a = list_field( default=UpperCamelCase__,metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def lowerCAmelCase_ ( snake_case_ ): try: int(snake_case_ ) return True except ValueError: return False def lowerCAmelCase_ ( snake_case_ ): try: float(snake_case_ ) return True except ValueError: return False class lowercase : def __init__( self , _a ) -> Optional[Any]: _A : int = args _A : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _A : List[str] = csv.DictReader(_a ) for row in reader: _A : Optional[Any] = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _A : Union[str, Any] = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _A : int = float(row["""result"""] ) def a__ ( self ) -> Any: _A , _A : Tuple = plt.subplots() _A : Union[str, Any] = """Time usage""" if self.args.is_time else """Memory usage""" _A : List[Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _A : Optional[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _A : List[Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _A : List[str] = self.result_dict[model_name]["""result"""] ((_A) , (_A)) : Dict = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _A : Union[str, Any] = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _A : int = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_a , ) else: _A : int = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_A) , (_A)) : Any = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _A : List[Any] = np.asarray(_a , _a )[: len(_a )] plt.scatter( _a , _a , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(_a , _a , """--""" ) title_str += F''' {label_model_name} vs.''' _A : Optional[Any] = title_str[:-4] _A : Optional[int] = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(_a ) plt.xlabel(_a ) plt.ylabel(_a ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def lowerCAmelCase_ ( ): _A : List[Any] = HfArgumentParser(snake_case_ ) _A : Tuple = parser.parse_args_into_dataclasses()[0] _A : Dict = Plot(args=snake_case_ ) plot.plot() if __name__ == "__main__": main()
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _snake_case = "CompVis/stable-diffusion-v1-1" _snake_case = "CompVis/stable-diffusion-v1-2" _snake_case = "CompVis/stable-diffusion-v1-3" _snake_case = "CompVis/stable-diffusion-v1-4" class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a = True , ) -> Dict: super()._init_() _A : Optional[Any] = StableDiffusionPipeline.from_pretrained(_a ) _A : List[str] = StableDiffusionPipeline.from_pretrained(_a ) _A : Dict = StableDiffusionPipeline.from_pretrained(_a ) _A : Union[str, Any] = StableDiffusionPipeline( vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , requires_safety_checker=_a , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def a__ ( self ) -> Dict[str, Any]: return {k: getattr(self , _a ) for k in self.config.keys() if not k.startswith("""_""" )} def a__ ( self , _a = "auto" ) -> Dict: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _A : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_a ) def a__ ( self ) -> Any: self.enable_attention_slicing(_a ) @torch.no_grad() def a__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> List[str]: return self.pipea( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) @torch.no_grad() def a__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> str: return self.pipea( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) @torch.no_grad() def a__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> int: return self.pipea( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) @torch.no_grad() def a__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> List[Any]: return self.pipea( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) @torch.no_grad() def a__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> Union[str, Any]: _A : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(_a ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 _A : Dict = self.textaimg_sda_a( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) # Get first result from Stable Diffusion Checkpoint v1.2 _A : Optional[int] = self.textaimg_sda_a( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) # Get first result from Stable Diffusion Checkpoint v1.3 _A : Dict = self.textaimg_sda_a( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) # Get first result from Stable Diffusion Checkpoint v1.4 _A : List[str] = self.textaimg_sda_a( prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
def lowerCAmelCase_ ( snake_case_ ): _A : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
import collections import os import re from pathlib import Path _snake_case = "src/transformers" # Matches is_xxx_available() _snake_case = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} _snake_case = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _snake_case = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available _snake_case = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") _snake_case = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _snake_case = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", _snake_case = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], _snake_case = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo _snake_case = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: _snake_case = re.compile(r"^\s*try:") # Catches a line with else: _snake_case = re.compile(r"^\s*else:") def lowerCAmelCase_ ( snake_case_ ): if _re_test_backend.search(snake_case_ ) is None: return None _A : Any = [b[0] for b in _re_backend.findall(snake_case_ )] backends.sort() return "_and_".join(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f: _A : Dict = f.readlines() _A : Union[str, Any] = 0 while line_index < len(snake_case_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(snake_case_ ): return None # First grab the objects without a specific backend in _import_structure _A : List[Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: _A : Dict = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(snake_case_ ): _A : Dict = _re_one_line_import_struct.search(snake_case_ ).groups()[0] _A : Any = re.findall(r"""\[([^\]]+)\]""",snake_case_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue _A : Optional[int] = _re_import_struct_key_value.search(snake_case_ ) if single_line_import_search is not None: _A : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case_ ) > 0] objects.extend(snake_case_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 _A : Any = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. _A : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): _A : List[str] = lines[line_index] if _re_import_struct_add_one.search(snake_case_ ) is not None: objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] ) elif _re_import_struct_add_many.search(snake_case_ ) is not None: _A : str = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(""", """ ) _A : str = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0] objects.extend(snake_case_ ) elif _re_between_brackets.search(snake_case_ ) is not None: _A : Dict = _re_between_brackets.search(snake_case_ ).groups()[0].split(""", """ ) _A : Dict = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0] objects.extend(snake_case_ ) elif _re_quote_object.search(snake_case_ ) is not None: objects.append(_re_quote_object.search(snake_case_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 _A : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A : str = [] while ( line_index < len(snake_case_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): _A : Dict = lines[line_index] _A : List[Any] = _re_import.search(snake_case_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 _A : Optional[int] = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(snake_case_ ): # If the line is an if is_backend_available, we grab all objects associated. _A : Union[str, Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): _A : Optional[int] = lines[line_index] _A : str = _re_import.search(snake_case_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 _A : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowerCAmelCase_ ( snake_case_,snake_case_ ): def find_duplicates(snake_case_ ): return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A : Tuple = [] for key in import_dict_objects.keys(): _A : List[str] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A : int = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def lowerCAmelCase_ ( ): _A : str = [] for root, _, files in os.walk(snake_case_ ): if "__init__.py" in files: _A : str = os.path.join(snake_case_,"""__init__.py""" ) _A : Any = parse_init(snake_case_ ) if objects is not None: _A : Optional[int] = analyze_results(*snake_case_ ) if len(snake_case_ ) > 0: _A : Optional[Any] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(snake_case_ ) ) if len(snake_case_ ) > 0: raise ValueError("""\n\n""".join(snake_case_ ) ) def lowerCAmelCase_ ( ): _A : Optional[Any] = [] for path, directories, files in os.walk(snake_case_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(snake_case_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(snake_case_ ) / folder).glob("""*.py""" ) ) ) == 0: continue _A : Optional[int] = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) ) _A : Optional[int] = short_path.replace(os.path.sep,""".""" ) submodules.append(snake_case_ ) for fname in files: if fname == "__init__.py": continue _A : Optional[Any] = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) ) _A : Dict = short_path.replace(""".py""","""""" ).replace(os.path.sep,""".""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(snake_case_ ) return submodules _snake_case = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def lowerCAmelCase_ ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A : List[Any] = direct_transformers_import(snake_case_ ) _A : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(snake_case_,"""__init__.py""" ),"""r""" ) as f: _A : str = f.read() import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""",snake_case_ ) ) ) _A : str = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(snake_case_ ) > 0: _A : Union[str, Any] = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1
def lowerCAmelCase_ ( snake_case_ ): _A : Optional[Any] = 0 for ch in input_str: _A : List[str] = ord(snake_case_ ) _A : List[str] = pow(2,snake_case_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Dict = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_a ).to(_a ) _A : Tuple = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _A : List[Any] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _A : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _A : Dict = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss _A : Tuple = -(labels.shape[-1] * loss.item()) _A : Union[str, Any] = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowercase ( UpperCamelCase__ ): def __get__( self , _a , _a=None ) -> Optional[int]: # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("""unreadable attribute""" ) _A : List[str] = """__cached_""" + self.fget.__name__ _A : Any = getattr(_a , _a , _a ) if cached is None: _A : Dict = self.fget(_a ) setattr(_a , _a , _a ) return cached def lowerCAmelCase_ ( snake_case_ ): _A : Union[str, Any] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def lowerCAmelCase_ ( snake_case_ ): if is_torch_fx_proxy(snake_case_ ): return True if is_torch_available(): import torch if isinstance(snake_case_,torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(snake_case_,tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(snake_case_,(jnp.ndarray, Tracer) ): return True return isinstance(snake_case_,np.ndarray ) def lowerCAmelCase_ ( snake_case_ ): return isinstance(snake_case_,np.ndarray ) def lowerCAmelCase_ ( snake_case_ ): return _is_numpy(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): import torch return isinstance(snake_case_,torch.Tensor ) def lowerCAmelCase_ ( snake_case_ ): return False if not is_torch_available() else _is_torch(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): import torch return isinstance(snake_case_,torch.device ) def lowerCAmelCase_ ( snake_case_ ): return False if not is_torch_available() else _is_torch_device(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): import torch if isinstance(snake_case_,snake_case_ ): if hasattr(snake_case_,snake_case_ ): _A : Optional[int] = getattr(snake_case_,snake_case_ ) else: return False return isinstance(snake_case_,torch.dtype ) def lowerCAmelCase_ ( snake_case_ ): return False if not is_torch_available() else _is_torch_dtype(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): import tensorflow as tf return isinstance(snake_case_,tf.Tensor ) def lowerCAmelCase_ ( snake_case_ ): return False if not is_tf_available() else _is_tensorflow(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(snake_case_,"""is_symbolic_tensor""" ): return tf.is_symbolic_tensor(snake_case_ ) return type(snake_case_ ) == tf.Tensor def lowerCAmelCase_ ( snake_case_ ): return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): import jax.numpy as jnp # noqa: F811 return isinstance(snake_case_,jnp.ndarray ) def lowerCAmelCase_ ( snake_case_ ): return False if not is_flax_available() else _is_jax(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): if isinstance(snake_case_,(dict, UserDict) ): return {k: to_py_obj(snake_case_ ) for k, v in obj.items()} elif isinstance(snake_case_,(list, tuple) ): return [to_py_obj(snake_case_ ) for o in obj] elif is_tf_tensor(snake_case_ ): return obj.numpy().tolist() elif is_torch_tensor(snake_case_ ): return obj.detach().cpu().tolist() elif is_jax_tensor(snake_case_ ): return np.asarray(snake_case_ ).tolist() elif isinstance(snake_case_,(np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowerCAmelCase_ ( snake_case_ ): if isinstance(snake_case_,(dict, UserDict) ): return {k: to_numpy(snake_case_ ) for k, v in obj.items()} elif isinstance(snake_case_,(list, tuple) ): return np.array(snake_case_ ) elif is_tf_tensor(snake_case_ ): return obj.numpy() elif is_torch_tensor(snake_case_ ): return obj.detach().cpu().numpy() elif is_jax_tensor(snake_case_ ): return np.asarray(snake_case_ ) else: return obj class lowercase ( UpperCamelCase__ ): def a__ ( self ) -> str: _A : int = fields(self ) # Safety and consistency checks if not len(_a ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) _A : str = getattr(self , class_fields[0].name ) _A : str = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_a ): if isinstance(_a , _a ): _A : int = first_field.items() _A : Dict = True else: try: _A : List[Any] = iter(_a ) _A : List[Any] = True except TypeError: _A : Dict = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_a ): if ( not isinstance(_a , (list, tuple) ) or not len(_a ) == 2 or not isinstance(element[0] , _a ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _A : Dict = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: _A : Any = element[1] elif first_field is not None: _A : List[str] = first_field else: for field in class_fields: _A : Any = getattr(self , field.name ) if v is not None: _A : int = v def __delitem__( self , *_a , **_a ) -> int: raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def a__ ( self , *_a , **_a ) -> Dict: raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def a__ ( self , *_a , **_a ) -> Optional[Any]: raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def a__ ( self , *_a , **_a ) -> List[Any]: raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self , _a ) -> Optional[int]: if isinstance(_a , _a ): _A : str = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , _a , _a ) -> List[Any]: if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_a , _a ) super().__setattr__(_a , _a ) def __setitem__( self , _a , _a ) -> int: # Will raise a KeyException if needed super().__setitem__(_a , _a ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_a , _a ) def a__ ( self ) -> Tuple[Any]: return tuple(self[k] for k in self.keys() ) class lowercase ( UpperCamelCase__,UpperCamelCase__ ): @classmethod def a__ ( cls , _a ) -> Any: raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class lowercase ( UpperCamelCase__ ): _a = "longest" _a = "max_length" _a = "do_not_pad" class lowercase ( UpperCamelCase__ ): _a = "pt" _a = "tf" _a = "np" _a = "jax" class lowercase : def __init__( self , _a ) -> Tuple: _A : List[Any] = context_managers _A : List[Any] = ExitStack() def __enter__( self ) -> List[str]: for context_manager in self.context_managers: self.stack.enter_context(_a ) def __exit__( self , *_a , **_a ) -> Union[str, Any]: self.stack.__exit__(*_a , **_a ) def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = infer_framework(snake_case_ ) if framework == "tf": _A : str = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _A : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models else: _A : int = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowerCAmelCase_ ( snake_case_ ): _A : Dict = model_class.__name__ _A : Optional[Any] = infer_framework(snake_case_ ) if framework == "tf": _A : Any = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _A : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models else: _A : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowerCAmelCase_ ( snake_case_,snake_case_ = "",snake_case_ = "." ): def _flatten_dict(snake_case_,snake_case_="",snake_case_="." ): for k, v in d.items(): _A : Dict = str(snake_case_ ) + delimiter + str(snake_case_ ) if parent_key else k if v and isinstance(snake_case_,snake_case_ ): yield from flatten_dict(snake_case_,snake_case_,delimiter=snake_case_ ).items() else: yield key, v return dict(_flatten_dict(snake_case_,snake_case_,snake_case_ ) ) @contextmanager def lowerCAmelCase_ ( snake_case_,snake_case_ = False ): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowerCAmelCase_ ( snake_case_,snake_case_=None ): if is_numpy_array(snake_case_ ): return np.transpose(snake_case_,axes=snake_case_ ) elif is_torch_tensor(snake_case_ ): return array.T if axes is None else array.permute(*snake_case_ ) elif is_tf_tensor(snake_case_ ): import tensorflow as tf return tf.transpose(snake_case_,perm=snake_case_ ) elif is_jax_tensor(snake_case_ ): return jnp.transpose(snake_case_,axes=snake_case_ ) else: raise ValueError(f'''Type not supported for transpose: {type(snake_case_ )}.''' ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): if is_numpy_array(snake_case_ ): return np.reshape(snake_case_,snake_case_ ) elif is_torch_tensor(snake_case_ ): return array.reshape(*snake_case_ ) elif is_tf_tensor(snake_case_ ): import tensorflow as tf return tf.reshape(snake_case_,snake_case_ ) elif is_jax_tensor(snake_case_ ): return jnp.reshape(snake_case_,snake_case_ ) else: raise ValueError(f'''Type not supported for reshape: {type(snake_case_ )}.''' ) def lowerCAmelCase_ ( snake_case_,snake_case_=None ): if is_numpy_array(snake_case_ ): return np.squeeze(snake_case_,axis=snake_case_ ) elif is_torch_tensor(snake_case_ ): return array.squeeze() if axis is None else array.squeeze(dim=snake_case_ ) elif is_tf_tensor(snake_case_ ): import tensorflow as tf return tf.squeeze(snake_case_,axis=snake_case_ ) elif is_jax_tensor(snake_case_ ): return jnp.squeeze(snake_case_,axis=snake_case_ ) else: raise ValueError(f'''Type not supported for squeeze: {type(snake_case_ )}.''' ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): if is_numpy_array(snake_case_ ): return np.expand_dims(snake_case_,snake_case_ ) elif is_torch_tensor(snake_case_ ): return array.unsqueeze(dim=snake_case_ ) elif is_tf_tensor(snake_case_ ): import tensorflow as tf return tf.expand_dims(snake_case_,axis=snake_case_ ) elif is_jax_tensor(snake_case_ ): return jnp.expand_dims(snake_case_,axis=snake_case_ ) else: raise ValueError(f'''Type not supported for expand_dims: {type(snake_case_ )}.''' ) def lowerCAmelCase_ ( snake_case_ ): if is_numpy_array(snake_case_ ): return np.size(snake_case_ ) elif is_torch_tensor(snake_case_ ): return array.numel() elif is_tf_tensor(snake_case_ ): import tensorflow as tf return tf.size(snake_case_ ) elif is_jax_tensor(snake_case_ ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(snake_case_ )}.''' ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): for key, value in auto_map.items(): if isinstance(snake_case_,(tuple, list) ): _A : int = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value] elif value is not None and "--" not in value: _A : List[str] = f'''{repo_id}--{value}''' return auto_map def lowerCAmelCase_ ( snake_case_ ): for base_class in inspect.getmro(snake_case_ ): _A : Optional[Any] = base_class.__module__ _A : Tuple = base_class.__name__ if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("""torch""" ) or name == "PreTrainedModel": return "pt" elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( nn.Module ): def __init__( self , _a=3 , _a=3 , _a=("DownEncoderBlock2D",) , _a=(64,) , _a=2 , _a=32 , _a="silu" , _a=True , ) -> Any: super().__init__() _A : List[str] = layers_per_block _A : Optional[Any] = torch.nn.Convad( _a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) _A : str = None _A : int = nn.ModuleList([] ) # down _A : Tuple = block_out_channels[0] for i, down_block_type in enumerate(_a ): _A : int = output_channel _A : List[str] = block_out_channels[i] _A : Optional[Any] = i == len(_a ) - 1 _A : Tuple = get_down_block( _a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , ) self.down_blocks.append(_a ) # mid _A : List[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , ) # out _A : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 ) _A : Optional[Any] = nn.SiLU() _A : List[str] = 2 * out_channels if double_z else out_channels _A : List[Any] = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 ) _A : Any = False def a__ ( self , _a ) -> List[str]: _A : Any = x _A : Optional[Any] = self.conv_in(_a ) if self.training and self.gradient_checkpointing: def create_custom_forward(_a ): def custom_forward(*_a ): return module(*_a ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: _A : int = torch.utils.checkpoint.checkpoint( create_custom_forward(_a ) , _a , use_reentrant=_a ) # middle _A : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , use_reentrant=_a ) else: for down_block in self.down_blocks: _A : str = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a ) # middle _A : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a ) else: # down for down_block in self.down_blocks: _A : List[Any] = down_block(_a ) # middle _A : Dict = self.mid_block(_a ) # post-process _A : Union[str, Any] = self.conv_norm_out(_a ) _A : List[str] = self.conv_act(_a ) _A : str = self.conv_out(_a ) return sample class lowercase ( nn.Module ): def __init__( self , _a=3 , _a=3 , _a=("UpDecoderBlock2D",) , _a=(64,) , _a=2 , _a=32 , _a="silu" , _a="group" , ) -> Optional[int]: super().__init__() _A : Tuple = layers_per_block _A : Any = nn.Convad( _a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) _A : Optional[Any] = None _A : str = nn.ModuleList([] ) _A : Optional[int] = in_channels if norm_type == """spatial""" else None # mid _A : List[str] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , ) # up _A : List[str] = list(reversed(_a ) ) _A : List[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(_a ): _A : Any = output_channel _A : List[str] = reversed_block_out_channels[i] _A : Optional[Any] = i == len(_a ) - 1 _A : List[str] = get_up_block( _a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , ) self.up_blocks.append(_a ) _A : Optional[int] = output_channel # out if norm_type == "spatial": _A : Dict = SpatialNorm(block_out_channels[0] , _a ) else: _A : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 ) _A : List[str] = nn.SiLU() _A : str = nn.Convad(block_out_channels[0] , _a , 3 , padding=1 ) _A : int = False def a__ ( self , _a , _a=None ) -> str: _A : Union[str, Any] = z _A : Dict = self.conv_in(_a ) _A : str = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(_a ): def custom_forward(*_a ): return module(*_a ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle _A : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a ) _A : Dict = sample.to(_a ) # up for up_block in self.up_blocks: _A : Tuple = torch.utils.checkpoint.checkpoint( create_custom_forward(_a ) , _a , _a , use_reentrant=_a ) else: # middle _A : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _a , _a ) _A : Optional[int] = sample.to(_a ) # up for up_block in self.up_blocks: _A : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a ) else: # middle _A : Tuple = self.mid_block(_a , _a ) _A : Dict = sample.to(_a ) # up for up_block in self.up_blocks: _A : Dict = up_block(_a , _a ) # post-process if latent_embeds is None: _A : List[Any] = self.conv_norm_out(_a ) else: _A : int = self.conv_norm_out(_a , _a ) _A : Tuple = self.conv_act(_a ) _A : Tuple = self.conv_out(_a ) return sample class lowercase ( nn.Module ): def __init__( self , _a , _a , _a , _a=None , _a="random" , _a=False , _a=True ) -> Union[str, Any]: super().__init__() _A : List[str] = n_e _A : List[str] = vq_embed_dim _A : int = beta _A : Optional[Any] = legacy _A : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) _A : Tuple = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) _A : Optional[Any] = self.used.shape[0] _A : Dict = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": _A : List[str] = self.re_embed _A : str = self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: _A : Optional[Any] = n_e _A : str = sane_index_shape def a__ ( self , _a ) -> List[str]: _A : Union[str, Any] = inds.shape assert len(_a ) > 1 _A : Dict = inds.reshape(ishape[0] , -1 ) _A : Any = self.used.to(_a ) _A : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() _A : Optional[int] = match.argmax(-1 ) _A : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": _A : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: _A : Tuple = self.unknown_index return new.reshape(_a ) def a__ ( self , _a ) -> Optional[int]: _A : Dict = inds.shape assert len(_a ) > 1 _A : Union[str, Any] = inds.reshape(ishape[0] , -1 ) _A : List[Any] = self.used.to(_a ) if self.re_embed > self.used.shape[0]: # extra token _A : Union[str, Any] = 0 # simply set to zero _A : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a ) return back.reshape(_a ) def a__ ( self , _a ) -> str: # reshape z -> (batch, height, width, channel) and flatten _A : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous() _A : List[str] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z _A : Tuple = torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 ) _A : List[Any] = self.embedding(_a ).view(z.shape ) _A : Optional[int] = None _A : int = None # compute loss for embedding if not self.legacy: _A : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: _A : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients _A : int = z + (z_q - z).detach() # reshape back to match original input shape _A : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: _A : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis _A : List[Any] = self.remap_to_used(_a ) _A : str = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: _A : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def a__ ( self , _a , _a ) -> Dict: # shape specifying (batch, height, width, channel) if self.remap is not None: _A : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis _A : str = self.unmap_to_all(_a ) _A : Optional[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors _A : Optional[int] = self.embedding(_a ) if shape is not None: _A : Tuple = z_q.view(_a ) # reshape back to match original input shape _A : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a=False ) -> List[str]: _A : str = parameters _A , _A : Dict = torch.chunk(_a , 2 , dim=1 ) _A : List[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) _A : Optional[int] = deterministic _A : List[str] = torch.exp(0.5 * self.logvar ) _A : Dict = torch.exp(self.logvar ) if self.deterministic: _A : str = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def a__ ( self , _a = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype _A : Optional[int] = randn_tensor( self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype ) _A : Optional[Any] = self.mean + self.std * sample return x def a__ ( self , _a=None ) -> int: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def a__ ( self , _a , _a=[1, 2, 3] ) -> Dict: if self.deterministic: return torch.Tensor([0.0] ) _A : Dict = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a ) def a__ ( self ) -> int: return self.mean
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowerCAmelCase_ ( snake_case_ ): return 1 / (1 + np.exp(-z )) def lowerCAmelCase_ ( snake_case_,snake_case_ ): return (-y * np.log(snake_case_ ) - (1 - y) * np.log(1 - h )).mean() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Optional[Any] = np.dot(snake_case_,snake_case_ ) return np.sum(y * scores - np.log(1 + np.exp(snake_case_ ) ) ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=70000 ): _A : Optional[Any] = np.zeros(x.shape[1] ) for iterations in range(snake_case_ ): _A : Any = np.dot(snake_case_,snake_case_ ) _A : Optional[int] = sigmoid_function(snake_case_ ) _A : List[Any] = np.dot(x.T,h - y ) / y.size _A : Any = theta - alpha * gradient # updating the weights _A : List[Any] = np.dot(snake_case_,snake_case_ ) _A : List[Any] = sigmoid_function(snake_case_ ) _A : List[str] = cost_function(snake_case_,snake_case_ ) if iterations % 100 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _snake_case = datasets.load_iris() _snake_case = iris.data[:, :2] _snake_case = (iris.target != 0) * 1 _snake_case = 0.1 _snake_case = logistic_reg(alpha, x, y, max_iterations=70000) print("theta: ", theta) # printing the theta i.e our weights vector def lowerCAmelCase_ ( snake_case_ ): return sigmoid_function( np.dot(snake_case_,snake_case_ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") ((_snake_case) , (_snake_case)) = (x[:, 0].min(), x[:, 0].max()) ((_snake_case) , (_snake_case)) = (x[:, 1].min(), x[:, 1].max()) ((_snake_case) , (_snake_case)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _snake_case = np.c_[xxa.ravel(), xxa.ravel()] _snake_case = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black") plt.legend() plt.show()
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
def lowerCAmelCase_ ( snake_case_ ): _A : str = [0 for i in range(len(snake_case_ ) )] # initialize interval's left pointer and right pointer _A , _A : Any = 0, 0 for i in range(1,len(snake_case_ ) ): # case when current index is inside the interval if i <= right_pointer: _A : str = min(right_pointer - i + 1,z_result[i - left_pointer] ) _A : Optional[int] = min_edge while go_next(snake_case_,snake_case_,snake_case_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: _A , _A : Tuple = i, i + z_result[i] - 1 return z_result def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): return i + z_result[i] < len(snake_case_ ) and s[z_result[i]] == s[i + z_result[i]] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[Any] = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string _A : Optional[Any] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient _snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def lowerCAmelCase_ ( snake_case_ ): _A : Any = test_results.split(""" """ ) _A : int = 0 _A : Union[str, Any] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. _A : Union[str, Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1] for i, expression in enumerate(snake_case_ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def lowerCAmelCase_ ( snake_case_ ): _A : Optional[Any] = {} _A : Optional[int] = None _A : Union[str, Any] = False for line in failures_short_lines.split("""\n""" ): if re.search(r"""_ \[doctest\]""",snake_case_ ): _A : Any = True _A : Tuple = line.split(""" """ )[2] elif in_error and not line.split(""" """ )[0].isdigit(): _A : List[str] = line _A : Optional[int] = False return failures class lowercase : def __init__( self , _a , _a ) -> str: _A : int = title _A : List[str] = doc_test_results["""time_spent"""].split(""",""" )[0] _A : Union[str, Any] = doc_test_results["""success"""] _A : str = doc_test_results["""failures"""] _A : Optional[Any] = self.n_success + self.n_failures # Failures and success of the modeling tests _A : Optional[int] = doc_test_results @property def a__ ( self ) -> str: _A : str = [self._time_spent] _A : Dict = 0 for time in time_spent: _A : List[Any] = time.split(""":""" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_a ) == 1: _A : Any = [0, 0, time_parts[0]] _A , _A , _A : Tuple = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds _A , _A , _A : int = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F'''{int(_a )}h{int(_a )}m{int(_a )}s''' @property def a__ ( self ) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def a__ ( self ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def a__ ( self ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' F''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def a__ ( self ) -> Dict: _A : int = 40 _A : int = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(_a , _a )} _A : int = """""" for category, failures in category_failures.items(): if len(_a ) == 0: continue if report != "": report += "\n\n" report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_a ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'''The following examples had failures:\n\n\n{report}\n''', }, } @property def a__ ( self ) -> str: _A : List[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_a ) @staticmethod def a__ ( ) -> int: _A : List[str] = [ { """type""": """section""", """text""": { """type""": """plain_text""", """text""": """There was an issue running the tests.""", }, """accessory""": { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True}, """url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } ] print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(_a )} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=_a , ) def a__ ( self ) -> Optional[int]: print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(self.payload )} ) ) _A : List[Any] = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed.""" _A : Optional[int] = client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=_a , ) def a__ ( self , _a , _a , _a , _a ) -> Union[str, Any]: _A : List[Any] = """""" for key, value in failures.items(): _A : Dict = value[:200] + """ [Truncated]""" if len(_a ) > 250 else value failures_text += F'''*{key}*\n_{value}_\n\n''' _A : int = job_name _A : int = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}} if job_link is not None: _A : List[str] = { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True}, """url""": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def a__ ( self ) -> Tuple: if self.thread_ts is None: raise ValueError("""Can only post reply if a post has been made.""" ) _A : List[str] = self.doc_test_results.pop("""job_link""" ) self.doc_test_results.pop("""failures""" ) self.doc_test_results.pop("""success""" ) self.doc_test_results.pop("""time_spent""" ) _A : Dict = sorted(self.doc_test_results.items() , key=lambda _a : t[0] ) for job, job_result in sorted_dict: if len(job_result["""failures"""] ): _A : Optional[int] = F'''*Num failures* :{len(job_result["failed"] )} \n''' _A : Dict = job_result["""failures"""] _A : Dict = self.get_reply_blocks(_a , _a , _a , text=_a ) print("""Sending the following reply""" ) print(json.dumps({"""blocks""": blocks} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=_a , thread_ts=self.thread_ts["""ts"""] , ) time.sleep(1 ) def lowerCAmelCase_ ( ): _A : Dict = os.environ["""GITHUB_RUN_ID"""] _A : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' _A : List[Any] = requests.get(snake_case_ ).json() _A : int = {} try: jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) _A : Any = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(snake_case_ ): _A : List[Any] = requests.get(url + f'''&page={i + 2}''' ).json() jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return jobs except Exception as e: print("""Unknown error, could not fetch links.""",snake_case_ ) return {} def lowerCAmelCase_ ( snake_case_ ): _A : Any = {} if os.path.exists(snake_case_ ): _A : str = os.listdir(snake_case_ ) for file in files: try: with open(os.path.join(snake_case_,snake_case_ ),encoding="""utf-8""" ) as f: _A : Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f'''Could not open {os.path.join(snake_case_,snake_case_ )}.''' ) from e return _artifact def lowerCAmelCase_ ( ): class lowercase : def __init__( self , _a ) -> Tuple: _A : List[str] = name _A : Dict = [] def __str__( self ) -> str: return self.name def a__ ( self , _a ) -> str: self.paths.append({"""name""": self.name, """path""": path} ) _A : Dict[str, Artifact] = {} _A : Union[str, Any] = filter(os.path.isdir,os.listdir() ) for directory in directories: _A : Any = directory if artifact_name not in _available_artifacts: _A : int = Artifact(snake_case_ ) _available_artifacts[artifact_name].add_path(snake_case_ ) return _available_artifacts if __name__ == "__main__": _snake_case = get_job_links() _snake_case = retrieve_available_artifacts() _snake_case = collections.OrderedDict( [ ("*.py", "API Examples"), ("*.md", "MD Examples"), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' _snake_case = { v: { "failed": [], "failures": {}, } for v in docs.values() } # Link to the GitHub Action job _snake_case = github_actions_job_links.get("run_doctests") _snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0] _snake_case = retrieve_artifact(artifact_path["name"]) if "stats" in artifact: _snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"]) _snake_case = failed _snake_case = success _snake_case = time_spent[1:-1] + ", " _snake_case = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): _snake_case = line.replace("FAILED ", "") _snake_case = line.split()[0].replace("\n", "") if "::" in line: _snake_case , _snake_case = line.split("::") else: _snake_case , _snake_case = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): _snake_case = docs[file_regex] doc_test_results[category]["failed"].append(test) _snake_case = all_failures[test] if test in all_failures else "N/A" _snake_case = failure break _snake_case = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _snake_case = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _snake_case = concatenate_datasets _snake_case = DownloadConfig _snake_case = DownloadManager _snake_case = DownloadMode _snake_case = DownloadConfig _snake_case = DownloadMode _snake_case = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class lowercase : _a = PegasusConfig _a = {} _a = "gelu" def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=40 , _a=2 , _a=1 , _a=0 , ) -> List[str]: _A : List[Any] = parent _A : Optional[Any] = batch_size _A : Tuple = seq_length _A : int = is_training _A : List[Any] = use_labels _A : int = vocab_size _A : Dict = hidden_size _A : Dict = num_hidden_layers _A : int = num_attention_heads _A : int = intermediate_size _A : Dict = hidden_dropout_prob _A : Dict = attention_probs_dropout_prob _A : Union[str, Any] = max_position_embeddings _A : int = eos_token_id _A : Dict = pad_token_id _A : List[str] = bos_token_id def a__ ( self ) -> str: _A : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A : Any = tf.concat([input_ids, eos_tensor] , axis=1 ) _A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A : List[Any] = prepare_pegasus_inputs_dict(_a , _a , _a ) return config, inputs_dict def a__ ( self , _a , _a ) -> Optional[Any]: _A : Any = TFPegasusModel(config=_a ).get_decoder() _A : Union[str, Any] = inputs_dict["""input_ids"""] _A : Union[str, Any] = input_ids[:1, :] _A : List[str] = inputs_dict["""attention_mask"""][:1, :] _A : Any = inputs_dict["""head_mask"""] _A : Any = 1 # first forward pass _A : int = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) _A , _A : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A : Dict = tf.concat([input_ids, next_tokens] , axis=-1 ) _A : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A : Any = model(_a , attention_mask=_a )[0] _A : List[Any] = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _A : int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None,snake_case_=None,snake_case_=None,snake_case_=None,snake_case_=None,): if attention_mask is None: _A : List[str] = tf.cast(tf.math.not_equal(snake_case_,config.pad_token_id ),tf.inta ) if decoder_attention_mask is None: _A : int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:],config.pad_token_id ),tf.inta ), ],axis=-1,) if head_mask is None: _A : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _a = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _a = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _a = True _a = False _a = False def a__ ( self ) -> Optional[Any]: _A : Tuple = TFPegasusModelTester(self ) _A : List[Any] = ConfigTester(self , config_class=_a ) def a__ ( self ) -> Optional[int]: self.config_tester.run_common_tests() def a__ ( self ) -> Any: _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_sentencepiece @require_tokenizers @require_tf class lowercase ( unittest.TestCase ): _a = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _a = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers _a = "google/pegasus-xsum" @cached_property def a__ ( self ) -> List[Any]: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def a__ ( self ) -> List[Any]: _A : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def a__ ( self , **_a ) -> Tuple: _A : Dict = self.translate_src_text(**_a ) assert self.expected_text == generated_words def a__ ( self , **_a ) -> Any: _A : str = self.tokenizer(self.src_text , **_a , padding=_a , return_tensors="""tf""" ) _A : Tuple = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_a , ) _A : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a ) return generated_words @slow def a__ ( self ) -> Optional[int]: self._assert_generated_batch_equal_expected()
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = ConsistencyModelPipeline _a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _a = frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def a__ ( self ) -> Dict: _A : List[str] = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" , subfolder="""test_unet""" , ) return unet @property def a__ ( self ) -> List[str]: _A : Optional[Any] = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , ) return unet def a__ ( self , _a=False ) -> List[Any]: if class_cond: _A : List[str] = self.dummy_cond_unet else: _A : List[str] = self.dummy_uncond_unet # Default to CM multistep sampler _A : int = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _A : List[Any] = { """unet""": unet, """scheduler""": scheduler, } return components def a__ ( self , _a , _a=0 ) -> Any: if str(_a ).startswith("""mps""" ): _A : int = torch.manual_seed(_a ) else: _A : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a ) _A : List[str] = { """batch_size""": 1, """num_inference_steps""": None, """timesteps""": [22, 0], """generator""": generator, """output_type""": """np""", } return inputs def a__ ( self ) -> Any: _A : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : Union[str, Any] = self.get_dummy_components() _A : Any = ConsistencyModelPipeline(**_a ) _A : Any = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _A : Optional[int] = self.get_dummy_inputs(_a ) _A : Any = pipe(**_a ).images assert image.shape == (1, 32, 32, 3) _A : Union[str, Any] = image[0, -3:, -3:, -1] _A : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def a__ ( self ) -> Dict: _A : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : Any = self.get_dummy_components(class_cond=_a ) _A : str = ConsistencyModelPipeline(**_a ) _A : List[str] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _A : List[str] = self.get_dummy_inputs(_a ) _A : Tuple = 0 _A : int = pipe(**_a ).images assert image.shape == (1, 32, 32, 3) _A : Union[str, Any] = image[0, -3:, -3:, -1] _A : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def a__ ( self ) -> Any: _A : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : Tuple = self.get_dummy_components() _A : Dict = ConsistencyModelPipeline(**_a ) _A : Optional[Any] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _A : Union[str, Any] = self.get_dummy_inputs(_a ) _A : Optional[Any] = 1 _A : Optional[Any] = None _A : Dict = pipe(**_a ).images assert image.shape == (1, 32, 32, 3) _A : Optional[int] = image[0, -3:, -3:, -1] _A : Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def a__ ( self ) -> List[str]: _A : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : Any = self.get_dummy_components(class_cond=_a ) _A : str = ConsistencyModelPipeline(**_a ) _A : List[str] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _A : Optional[int] = self.get_dummy_inputs(_a ) _A : Optional[int] = 1 _A : Any = None _A : str = 0 _A : Dict = pipe(**_a ).images assert image.shape == (1, 32, 32, 3) _A : str = image[0, -3:, -3:, -1] _A : str = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def a__ ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self , _a=0 , _a=False , _a="cpu" , _a=torch.floataa , _a=(1, 3, 64, 64) ) -> Optional[int]: _A : Optional[int] = torch.manual_seed(_a ) _A : List[Any] = { """num_inference_steps""": None, """timesteps""": [22, 0], """class_labels""": 0, """generator""": generator, """output_type""": """np""", } if get_fixed_latents: _A : Tuple = self.get_fixed_latents(seed=_a , device=_a , dtype=_a , shape=_a ) _A : Any = latents return inputs def a__ ( self , _a=0 , _a="cpu" , _a=torch.floataa , _a=(1, 3, 64, 64) ) -> Optional[Any]: if type(_a ) == str: _A : int = torch.device(_a ) _A : Optional[int] = torch.Generator(device=_a ).manual_seed(_a ) _A : Tuple = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) return latents def a__ ( self ) -> Any: _A : List[str] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) _A : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _A : Dict = ConsistencyModelPipeline(unet=_a , scheduler=_a ) pipe.to(torch_device=_a ) pipe.set_progress_bar_config(disable=_a ) _A : List[Any] = self.get_inputs() _A : Optional[Any] = pipe(**_a ).images assert image.shape == (1, 64, 64, 3) _A : int = image[0, -3:, -3:, -1] _A : Dict = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def a__ ( self ) -> Optional[int]: _A : Any = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) _A : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _A : Dict = ConsistencyModelPipeline(unet=_a , scheduler=_a ) pipe.to(torch_device=_a ) pipe.set_progress_bar_config(disable=_a ) _A : Union[str, Any] = self.get_inputs() _A : Optional[Any] = 1 _A : str = None _A : int = pipe(**_a ).images assert image.shape == (1, 64, 64, 3) _A : Optional[int] = image[0, -3:, -3:, -1] _A : Tuple = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def a__ ( self ) -> Optional[Any]: _A : Any = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) _A : Optional[int] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _A : Dict = ConsistencyModelPipeline(unet=_a , scheduler=_a ) pipe.to(torch_device=_a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_a ) _A : int = self.get_inputs(get_fixed_latents=_a , device=_a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_a , enable_math=_a , enable_mem_efficient=_a ): _A : Union[str, Any] = pipe(**_a ).images assert image.shape == (1, 64, 64, 3) _A : Optional[Any] = image[0, -3:, -3:, -1] _A : Dict = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def a__ ( self ) -> Optional[int]: _A : int = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) _A : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _A : str = ConsistencyModelPipeline(unet=_a , scheduler=_a ) pipe.to(torch_device=_a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_a ) _A : Any = self.get_inputs(get_fixed_latents=_a , device=_a ) _A : Dict = 1 _A : Union[str, Any] = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_a , enable_math=_a , enable_mem_efficient=_a ): _A : int = pipe(**_a ).images assert image.shape == (1, 64, 64, 3) _A : Optional[int] = image[0, -3:, -3:, -1] _A : Dict = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _snake_case = datasets.utils.logging.get_logger(__name__) @dataclass class lowercase ( datasets.BuilderConfig ): _a = None _a = "utf-8" _a = None _a = None _a = True # deprecated _a = None # deprecated _a = 1_0 << 2_0 # 10MB _a = None class lowercase ( datasets.ArrowBasedBuilder ): _a = JsonConfig def a__ ( self ) -> Union[str, Any]: if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) _A : Optional[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def a__ ( self , _a ) -> Union[str, Any]: if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _A : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): _A : List[str] = data_files if isinstance(_a , _a ): _A : Optional[int] = [files] _A : str = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _A : Tuple = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): _A : Optional[int] = [files] _A : Tuple = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) ) return splits def a__ ( self , _a ) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): _A : List[Any] = self.config.features.arrow_schema.field(_a ).type _A : str = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example _A : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def a__ ( self , _a ) -> Optional[int]: for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: _A : Any = json.load(_a ) # We keep only the field we are interested in _A : Optional[int] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_a , (list, tuple) ): _A : Union[str, Any] = set().union(*[row.keys() for row in dataset] ) _A : Union[str, Any] = {col: [row.get(_a ) for row in dataset] for col in keys} else: _A : List[str] = dataset _A : Optional[Any] = pa.Table.from_pydict(_a ) yield file_idx, self._cast_table(_a ) # If the file has one json object per line else: with open(_a , """rb""" ) as f: _A : Dict = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small _A : List[str] = max(self.config.chunksize // 32 , 16 << 10 ) _A : Any = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: _A : List[Any] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_a ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": _A : List[str] = batch.decode(self.config.encoding , errors=_a ).encode("""utf-8""" ) try: while True: try: _A : Union[str, Any] = paj.read_json( io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_a , pa.ArrowInvalid ) and "straddling" not in str(_a ) or block_size > len(_a ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: _A : List[str] = json.load(_a ) except json.JSONDecodeError: logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_a , _a ): # list is the only sequence type supported in JSON try: _A : str = set().union(*[row.keys() for row in dataset] ) _A : Dict = {col: [row.get(_a ) for row in dataset] for col in keys} _A : List[Any] = pa.Table.from_pydict(_a ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(_a ) break else: logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise ValueError( F'''Not able to read records in the JSON file at {file}. ''' F'''You should probably indicate the field of the JSON file containing your records. ''' F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_a ) batch_idx += 1
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase : def __init__( self , _a , _a=2 , _a=3 , _a=4 , _a=2 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=36 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=6 , _a=6 , _a=3 , _a=4 , _a=None , _a=1000 , ) -> int: _A : Tuple = parent _A : int = batch_size _A : Union[str, Any] = num_channels _A : int = image_size _A : Tuple = patch_size _A : Union[str, Any] = is_training _A : List[Any] = use_input_mask _A : Tuple = use_token_type_ids _A : Optional[int] = use_labels _A : Any = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = intermediate_size _A : Optional[Any] = hidden_act _A : Optional[Any] = hidden_dropout_prob _A : Dict = attention_probs_dropout_prob _A : Any = max_position_embeddings _A : int = type_vocab_size _A : Tuple = type_sequence_label_size _A : Optional[int] = initializer_range _A : List[Any] = coordinate_size _A : Dict = shape_size _A : Tuple = num_labels _A : str = num_choices _A : str = scope _A : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _A : Union[str, Any] = text_seq_length _A : Optional[Any] = (image_size // patch_size) ** 2 + 1 _A : List[str] = self.text_seq_length + self.image_seq_length def a__ ( self ) -> str: _A : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _A : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) _A : Dict = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _A : Union[str, Any] = bbox[i, j, 3] _A : Optional[int] = bbox[i, j, 1] _A : Dict = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: _A : Union[str, Any] = bbox[i, j, 2] _A : Dict = bbox[i, j, 0] _A : Union[str, Any] = tmp_coordinate _A : Any = tf.constant(_a ) _A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A : List[Any] = None if self.use_input_mask: _A : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] ) _A : List[Any] = None if self.use_token_type_ids: _A : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _A : Union[str, Any] = None _A : Union[str, Any] = None if self.use_labels: _A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _A : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _A : str = TFLayoutLMvaModel(config=_a ) # text + image _A : List[str] = model(_a , pixel_values=_a , training=_a ) _A : Union[str, Any] = model( _a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , training=_a , ) _A : int = model(_a , bbox=_a , pixel_values=_a , training=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _A : Dict = model(_a , training=_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _A : Union[str, Any] = model({"""pixel_values""": pixel_values} , training=_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> int: _A : str = self.num_labels _A : List[str] = TFLayoutLMvaForSequenceClassification(config=_a ) _A : int = model( _a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , labels=_a , training=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any: _A : str = self.num_labels _A : Any = TFLayoutLMvaForTokenClassification(config=_a ) _A : Union[str, Any] = model( _a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , labels=_a , training=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _A : int = 2 _A : Dict = TFLayoutLMvaForQuestionAnswering(config=_a ) _A : Tuple = model( _a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , training=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self ) -> Union[str, Any]: _A : int = self.prepare_config_and_inputs() ((_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A)) : Any = config_and_inputs _A : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _a = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) _a = False _a = False _a = False def a__ ( self , _a , _a , _a , _a , _a ) -> Optional[int]: return True def a__ ( self , _a , _a , _a=False ) -> dict: _A : int = copy.deepcopy(_a ) if model_class in get_values(_a ): _A : List[Any] = { k: tf.tile(tf.expand_dims(_a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_a ): _A : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_a ): _A : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) _A : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_a ): _A : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_a ): _A : Tuple = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def a__ ( self ) -> List[str]: _A : List[str] = TFLayoutLMvaModelTester(self ) _A : Optional[Any] = ConfigTester(self , config_class=_a , hidden_size=37 ) def a__ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self ) -> List[str]: _A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Dict = model_class(_a ) if getattr(_a , """hf_compute_loss""" , _a ): # The number of elements in the loss should be the same as the number of elements in the label _A : int = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a ) _A : int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_a )[0] ] _A : int = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs _A : List[str] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a ) _A : Union[str, Any] = prepared_for_class.pop("""input_ids""" ) _A : Tuple = model(_a , **_a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions _A : Tuple = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a ) _A : Any = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: _A : int = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: _A : Tuple = -100 _A : Dict = tf.convert_to_tensor(_a ) _A : Any = model(_a , **_a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict _A : List[str] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a ) _A : int = model(_a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple _A : List[str] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a ) # Get keys that were added with the _prepare_for_class function _A : str = prepared_for_class.keys() - inputs_dict.keys() _A : Dict = inspect.signature(model.call ).parameters _A : Dict = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple _A : Dict = {0: """input_ids"""} for label_key in label_keys: _A : str = signature_names.index(_a ) _A : int = label_key _A : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple _A : List[str] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: _A : Tuple = prepared_for_class[value] _A : int = tuple(_a ) # Send to model _A : List[str] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def a__ ( self ) -> Optional[int]: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_a , _a , _a , _a , _a , _a ) def a__ ( self ) -> Any: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A : Optional[Any] = type self.model_tester.create_and_check_model(_a , _a , _a , _a , _a , _a ) def a__ ( self ) -> Tuple: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _a , _a , _a , _a , _a , _a , _a ) def a__ ( self ) -> Tuple: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _a , _a , _a , _a , _a , _a , _a ) def a__ ( self ) -> List[str]: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _a , _a , _a , _a , _a , _a , _a ) @slow def a__ ( self ) -> str: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A : Tuple = TFLayoutLMvaModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def lowerCAmelCase_ ( ): _A : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class lowercase ( unittest.TestCase ): @cached_property def a__ ( self ) -> Tuple: return LayoutLMvaImageProcessor(apply_ocr=_a ) if is_vision_available() else None @slow def a__ ( self ) -> Union[str, Any]: _A : Optional[int] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) _A : Tuple = self.default_image_processor _A : int = prepare_img() _A : List[Any] = image_processor(images=_a , return_tensors="""tf""" ).pixel_values _A : str = tf.constant([[1, 2]] ) _A : int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass _A : Tuple = model(input_ids=_a , bbox=_a , pixel_values=_a , training=_a ) # verify the logits _A : int = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _a ) _A : List[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-4 ) )
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _snake_case = "▁" _snake_case = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = BertGenerationTokenizer _a = False _a = True def a__ ( self ) -> str: super().setUp() _A : Tuple = BertGenerationTokenizer(_a , keep_accents=_a ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self ) -> Union[str, Any]: _A : List[Any] = """<s>""" _A : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def a__ ( self ) -> List[Any]: _A : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(_a ) , 1002 ) def a__ ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def a__ ( self ) -> Union[str, Any]: _A : str = BertGenerationTokenizer(_a , keep_accents=_a ) _A : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , ) _A : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _A : List[str] = tokenizer.convert_tokens_to_ids(_a ) self.assertListEqual( _a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_a ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def a__ ( self ) -> Tuple: return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def a__ ( self ) -> List[Any]: _A : List[Any] = """Hello World!""" _A : List[str] = [1_8536, 2260, 101] self.assertListEqual(_a , self.big_tokenizer.encode(_a ) ) @slow def a__ ( self ) -> List[str]: _A : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _A : List[Any] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, ] self.assertListEqual(_a , self.big_tokenizer.encode(_a ) ) @require_torch @slow def a__ ( self ) -> Optional[Any]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence _A : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] _A : str = """ """.join(_a ) _A : int = self.big_tokenizer.encode_plus(_a , return_tensors="""pt""" , return_token_type_ids=_a ) _A : Any = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_a ) _A : Optional[int] = BertGenerationConfig() _A : int = BertGenerationEncoder(_a ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_a ) model(**_a ) @slow def a__ ( self ) -> Optional[Any]: # fmt: off _A : Any = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
def lowerCAmelCase_ ( snake_case_ ): _A : Optional[int] = set() # edges = list of graph's edges _A : int = get_edges(snake_case_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: _A , _A : Any = edges.pop() chosen_vertices.add(snake_case_ ) chosen_vertices.add(snake_case_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(snake_case_ ) return chosen_vertices def lowerCAmelCase_ ( snake_case_ ): _A : Optional[Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
import itertools import math def lowerCAmelCase_ ( snake_case_ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5,int(math.sqrt(snake_case_ ) + 1 ),6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ): _A : Dict = 2 while True: if is_prime(snake_case_ ): yield num num += 1 def lowerCAmelCase_ ( snake_case_ = 10001 ): return next(itertools.islice(prime_generator(),nth - 1,snake_case_ ) ) if __name__ == "__main__": print(f"""{solution() = }""")
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = IFImgaImgSuperResolutionPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} ) _a = PipelineTesterMixin.required_optional_params - {"latents"} def a__ ( self ) -> List[Any]: return self._get_superresolution_dummy_components() def a__ ( self , _a , _a=0 ) -> Any: if str(_a ).startswith("""mps""" ): _A : Tuple = torch.manual_seed(_a ) else: _A : Tuple = torch.Generator(device=_a ).manual_seed(_a ) _A : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) _A : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) _A : Optional[int] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def a__ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def a__ ( self ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def a__ ( self ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def a__ ( self ) -> Union[str, Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def a__ ( self ) -> List[Any]: self._test_save_load_local() def a__ ( self ) -> str: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
from ...processing_utils import ProcessorMixin class lowercase ( UpperCamelCase__ ): _a = "WhisperFeatureExtractor" _a = "WhisperTokenizer" def __init__( self , _a , _a ) -> str: super().__init__(_a , _a ) _A : Dict = self.feature_extractor _A : List[Any] = False def a__ ( self , _a=None , _a=None , _a=True ) -> List[str]: return self.tokenizer.get_decoder_prompt_ids(task=_a , language=_a , no_timestamps=_a ) def __call__( self , *_a , **_a ) -> List[str]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) _A : Union[str, Any] = kwargs.pop("""audio""" , _a ) _A : Tuple = kwargs.pop("""sampling_rate""" , _a ) _A : int = kwargs.pop("""text""" , _a ) if len(_a ) > 0: _A : Dict = args[0] _A : int = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: _A : Optional[int] = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a ) if text is not None: _A : int = self.tokenizer(_a , **_a ) if text is None: return inputs elif audio is None: return encodings else: _A : List[Any] = encodings["""input_ids"""] return inputs def a__ ( self , *_a , **_a ) -> Any: return self.tokenizer.batch_decode(*_a , **_a ) def a__ ( self , *_a , **_a ) -> int: return self.tokenizer.decode(*_a , **_a ) def a__ ( self , _a , _a="np" ) -> Optional[int]: return self.tokenizer.get_prompt_ids(_a , return_tensors=_a )
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowerCAmelCase_ ( ): _A : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--model_ckpt""",type=snake_case_,default="""microsoft/unixcoder-base-nine""" ) parser.add_argument("""--num_epochs""",type=snake_case_,default=5 ) parser.add_argument("""--batch_size""",type=snake_case_,default=6 ) parser.add_argument("""--gradient_accumulation_steps""",type=snake_case_,default=1 ) parser.add_argument("""--freeze""",type=snake_case_,default=snake_case_ ) parser.add_argument("""--learning_rate""",type=snake_case_,default=5e-4 ) parser.add_argument("""--seed""",type=snake_case_,default=0 ) parser.add_argument("""--lr_scheduler_type""",type=snake_case_,default="""cosine""" ) parser.add_argument("""--num_warmup_steps""",type=snake_case_,default=10 ) parser.add_argument("""--weight_decay""",type=snake_case_,default=0.01 ) parser.add_argument("""--output_dir""",type=snake_case_,default="""./results""" ) return parser.parse_args() _snake_case = load("accuracy") def lowerCAmelCase_ ( snake_case_ ): _A , _A : str = eval_pred _A : Optional[Any] = np.argmax(snake_case_,axis=1 ) return metric.compute(predictions=snake_case_,references=snake_case_ ) class lowercase ( UpperCamelCase__ ): def __init__( self , _a ) -> None: super().__init__() _A : Dict = trainer def a__ ( self , _a , _a , _a , **_a ) -> str: if control.should_evaluate: _A : Tuple = deepcopy(_a ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" ) return control_copy def lowerCAmelCase_ ( ): _A : List[Any] = get_args() set_seed(args.seed ) _A : Optional[int] = load_dataset("""codeparrot/codecomplex""",split="""train""" ) _A : Optional[int] = dataset.train_test_split(test_size=0.2 ) _A : str = train_test["""test"""].train_test_split(test_size=0.5 ) _A : Optional[int] = DatasetDict( { """train""": train_test["""train"""], """test""": test_validation["""train"""], """valid""": test_validation["""test"""], } ) print("""Loading tokenizer and model""" ) _A : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt ) _A : List[str] = tokenizer.eos_token _A : int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt,num_labels=7 ) _A : Union[str, Any] = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _A : int = False _A : int = ClassLabel(num_classes=7,names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) ) def tokenize(snake_case_ ): _A : List[str] = tokenizer(example["""src"""],truncation=snake_case_,max_length=1024 ) _A : List[Any] = labels.straint(example["""complexity"""] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _A : Optional[Any] = train_test_validation.map( snake_case_,batched=snake_case_,remove_columns=train_test_validation["""train"""].column_names,) _A : List[str] = DataCollatorWithPadding(tokenizer=snake_case_ ) _A : int = TrainingArguments( output_dir=args.output_dir,learning_rate=args.learning_rate,lr_scheduler_type=args.lr_scheduler_type,evaluation_strategy="""epoch""",save_strategy="""epoch""",logging_strategy="""epoch""",per_device_train_batch_size=args.batch_size,per_device_eval_batch_size=args.batch_size,num_train_epochs=args.num_epochs,gradient_accumulation_steps=args.gradient_accumulation_steps,weight_decay=0.01,metric_for_best_model="""accuracy""",run_name="""complexity-java""",report_to="""wandb""",) _A : Dict = Trainer( model=snake_case_,args=snake_case_,train_dataset=tokenized_datasets["""train"""],eval_dataset=tokenized_datasets["""valid"""],tokenizer=snake_case_,data_collator=snake_case_,compute_metrics=snake_case_,) print("""Training...""" ) trainer.add_callback(CustomCallback(snake_case_ ) ) trainer.train() if __name__ == "__main__": main()
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = RobertaTokenizer _a = RobertaTokenizerFast _a = True _a = {"cls_token": "<s>"} def a__ ( self ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _A : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _A : List[str] = dict(zip(_a , range(len(_a ) ) ) ) _A : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _A : Optional[int] = {"""unk_token""": """<unk>"""} _A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_a ) ) def a__ ( self , **_a ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def a__ ( self , **_a ) -> str: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def a__ ( self , _a ) -> Dict: _A : Optional[int] = """lower newer""" _A : int = """lower newer""" return input_text, output_text def a__ ( self ) -> List[str]: _A : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A : List[str] = """lower newer""" _A : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _A : str = tokenizer.tokenize(_a ) # , add_prefix_space=True) self.assertListEqual(_a , _a ) _A : Optional[Any] = tokens + [tokenizer.unk_token] _A : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def a__ ( self ) -> List[Any]: _A : Optional[int] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_a ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_a ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def a__ ( self ) -> str: _A : Union[str, Any] = self.tokenizer_class.from_pretrained("""roberta-base""" ) _A : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a ) _A : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a ) _A : Optional[Any] = tokenizer.encode( """sequence builders""" , add_special_tokens=_a , add_prefix_space=_a ) _A : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=_a , add_prefix_space=_a ) _A : List[str] = tokenizer.build_inputs_with_special_tokens(_a ) _A : Dict = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def a__ ( self ) -> List[Any]: _A : Optional[int] = self.get_tokenizer() _A : List[Any] = """Encode this sequence.""" _A : Any = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments _A : Dict = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _A : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_a , _a ) _A : int = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _A : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_a , _a ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) _A : List[Any] = tokenizer.encode(_a , add_special_tokens=_a ) _A : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_a , _a ) # Testing spaces after special tokens _A : Dict = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space _A : Any = tokenizer.convert_tokens_to_ids(_a ) _A : Union[str, Any] = """Encode <mask> sequence""" _A : Optional[Any] = """Encode <mask>sequence""" _A : List[Any] = tokenizer.encode(_a ) _A : int = encoded.index(_a ) _A : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_a , _a ) _A : Optional[Any] = tokenizer.encode(_a ) _A : Optional[Any] = encoded.index(_a ) _A : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_a , _a ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A : Any = self.rust_tokenizer_class.from_pretrained(_a , **_a ) _A : int = self.tokenizer_class.from_pretrained(_a , **_a ) _A : Union[str, Any] = """A, <mask> AllenNLP sentence.""" _A : List[Any] = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) _A : Optional[int] = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) _A : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _A : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( _a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( _a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def a__ ( self ) -> Any: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _A : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _A : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _a ) self.assertEqual(post_processor_state["""add_prefix_space"""] , _a ) self.assertEqual(post_processor_state["""trim_offsets"""] , _a ) def a__ ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` _A : Dict = F'''{text_of_1_token} {text_of_1_token}''' _A : Tuple = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) _A : List[str] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) _A : Dict = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : List[str] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) _A : int = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : List[Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) _A : Dict = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _A : Optional[Any] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Union[str, Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , ) _A : Optional[int] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : int = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , ) _A : Optional[int] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _A : Optional[Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase ( unittest.TestCase ): _a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def a__ ( self , _a , _a , _a ) -> int: _A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a ) return generator, ["Something to write", "Something else"] def a__ ( self , _a , _a ) -> Dict: _A : Any = generator("""Something there""" ) self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) _A : Optional[int] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a ) self.assertEqual( _a , [ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] , ) with self.assertRaises(_a ): generator(4 ) @require_torch def a__ ( self ) -> List[str]: _A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _A : Dict = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] ) _A : Any = 3 _A : Any = generator( """Something there""" , num_return_sequences=_a , num_beams=_a , ) _A : Optional[int] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a , _a ) _A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a ) self.assertEqual( _a , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _A : Dict = generator.model.config.eos_token_id _A : List[str] = """<pad>""" _A : Dict = generator( ["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , ) self.assertEqual( _a , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def a__ ( self ) -> int: _A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _A : str = generator("""Something there""" , do_sample=_a ) self.assertEqual(_a , [{"""generated_text""": """"""}] )
26
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowercase ( unittest.TestCase ): def a__ ( self ) -> str: _A : Any = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() _A : Tuple = dict(zip(_a , range(len(_a ) ) ) ) _A : List[str] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } _A : Optional[Any] = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 1_6000, """return_attention_mask""": False, """do_normalize""": True, } _A : Union[str, Any] = tempfile.mkdtemp() _A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _A : Optional[Any] = os.path.join(self.tmpdirname , _a ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_a ) + """\n""" ) with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_a ) + """\n""" ) # load decoder from hub _A : str = """hf-internal-testing/ngram-beam-search-decoder""" def a__ ( self , **_a ) -> Union[str, Any]: _A : str = self.add_kwargs_tokens_map.copy() kwargs.update(_a ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a ) def a__ ( self , **_a ) -> Dict: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a ) def a__ ( self , **_a ) -> str: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a ) def a__ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def a__ ( self ) -> str: _A : List[Any] = self.get_tokenizer() _A : Optional[Any] = self.get_feature_extractor() _A : List[str] = self.get_decoder() _A : List[str] = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) processor.save_pretrained(self.tmpdirname ) _A : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _a ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _a ) def a__ ( self ) -> Union[str, Any]: _A : List[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _A : int = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def a__ ( self ) -> Optional[Any]: _A : Dict = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(_a , """include""" ): WavaVecaProcessorWithLM( tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def a__ ( self ) -> List[str]: _A : str = self.get_feature_extractor() _A : Tuple = self.get_tokenizer() _A : List[Any] = self.get_decoder() _A : Any = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) _A : Tuple = floats_list((3, 1000) ) _A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ) _A : Tuple = processor(_a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a__ ( self ) -> str: _A : Any = self.get_feature_extractor() _A : List[str] = self.get_tokenizer() _A : List[Any] = self.get_decoder() _A : List[str] = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) _A : int = """This is a test string""" _A : List[str] = processor(text=_a ) _A : Union[str, Any] = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a__ ( self , _a=(2, 10, 16) , _a=77 ) -> Optional[int]: np.random.seed(_a ) return np.random.rand(*_a ) def a__ ( self ) -> List[str]: _A : Dict = self.get_feature_extractor() _A : List[Any] = self.get_tokenizer() _A : Optional[int] = self.get_decoder() _A : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) _A : List[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) _A : Dict = processor.decode(_a ) _A : Optional[Any] = decoder.decode_beams(_a )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def a__ ( self , _a ) -> int: _A : int = self.get_feature_extractor() _A : Any = self.get_tokenizer() _A : int = self.get_decoder() _A : Tuple = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) _A : Union[str, Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _A : Any = processor.batch_decode(_a ) else: with get_context(_a ).Pool() as pool: _A : Optional[int] = processor.batch_decode(_a , _a ) _A : Optional[int] = list(_a ) with get_context("""fork""" ).Pool() as p: _A : Tuple = decoder.decode_beams_batch(_a , _a ) _A , _A , _A : str = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_a , decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text ) self.assertListEqual(_a , decoded_processor.logit_score ) self.assertListEqual(_a , decoded_processor.lm_score ) def a__ ( self ) -> Optional[Any]: _A : Any = self.get_feature_extractor() _A : str = self.get_tokenizer() _A : int = self.get_decoder() _A : Dict = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) _A : List[Any] = self._get_dummy_logits() _A : Union[str, Any] = 15 _A : str = -20.0 _A : Optional[int] = -4.0 _A : Union[str, Any] = processor.batch_decode( _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) _A : str = decoded_processor_out.text _A : Dict = list(_a ) with get_context("""fork""" ).Pool() as pool: _A : Union[str, Any] = decoder.decode_beams_batch( _a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) _A : Optional[int] = [d[0][0] for d in decoded_decoder_out] _A : int = [d[0][2] for d in decoded_decoder_out] _A : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _a ) self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) ) self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _a , atol=1e-3 ) ) def a__ ( self ) -> str: _A : Any = self.get_feature_extractor() _A : Dict = self.get_tokenizer() _A : Any = self.get_decoder() _A : Any = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) _A : Dict = self._get_dummy_logits() _A : Any = 2.0 _A : int = 5.0 _A : Union[str, Any] = -20.0 _A : str = True _A : Optional[Any] = processor.batch_decode( _a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) _A : Union[str, Any] = decoded_processor_out.text _A : Tuple = list(_a ) decoder.reset_params( alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) with get_context("""fork""" ).Pool() as pool: _A : Optional[Any] = decoder.decode_beams_batch( _a , _a , ) _A : Dict = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _a ) _A : List[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _a ) def a__ ( self ) -> Any: _A : Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _A : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] _A : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() _A : Any = os.listdir(_a ) _A : int = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_a , _a ) def a__ ( self ) -> Dict: _A : int = snapshot_download("""hf-internal-testing/processor_with_lm""" ) _A : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(_a ) _A : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] _A : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() _A : Optional[int] = os.listdir(_a ) _A : List[str] = os.listdir(_a ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_a , _a ) def a__ ( self ) -> Optional[Any]: _A : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _A : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _A : List[str] = floats_list((3, 1000) ) _A : Union[str, Any] = processor_wavaveca(_a , return_tensors="""np""" ) _A : Any = processor_auto(_a , return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) _A : Tuple = self._get_dummy_logits() _A : List[str] = processor_wavaveca.batch_decode(_a ) _A : Any = processor_auto.batch_decode(_a ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def a__ ( self ) -> Dict: _A : Dict = self.get_feature_extractor() _A : Optional[int] = self.get_tokenizer() _A : int = self.get_decoder() _A : List[str] = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , ) @staticmethod def a__ ( _a , _a ) -> int: _A : List[Any] = [d[key] for d in offsets] return retrieved_list def a__ ( self ) -> int: _A : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _A : Optional[int] = self._get_dummy_logits()[0] _A : str = processor.decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] ) def a__ ( self ) -> int: _A : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _A : Tuple = self._get_dummy_logits() _A : int = processor.batch_decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(_a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def a__ ( self ) -> Tuple: import torch _A : Union[str, Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_a ) _A : List[Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) ) _A : Optional[Any] = iter(_a ) _A : Any = next(_a ) _A : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) _A : Optional[int] = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _A : Dict = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values with torch.no_grad(): _A : Dict = model(_a ).logits.cpu().numpy() _A : Optional[int] = processor.decode(logits[0] , output_word_offsets=_a ) _A : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _A : List[Any] = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] _A : Optional[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(_a , """word""" ) ) , _a ) self.assertEqual(""" """.join(self.get_from_offsets(_a , """word""" ) ) , output.text ) # output times _A : Optional[Any] = torch.tensor(self.get_from_offsets(_a , """start_time""" ) ) _A : int = torch.tensor(self.get_from_offsets(_a , """end_time""" ) ) # fmt: off _A : Dict = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) _A : str = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) ) self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): while b: _A , _A : List[str] = b, a % b return a def lowerCAmelCase_ ( snake_case_,snake_case_ ): return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b ) def lowerCAmelCase_ ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' ) if __name__ == "__main__": main()
26
1
import requests def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[str] = {"""Content-Type""": """application/json"""} _A : str = requests.post(snake_case_,json={"""text""": message_body},headers=snake_case_ ) if response.status_code != 200: _A : Union[str, Any] = ( """Request to slack returned an error """ f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
26
def lowerCAmelCase_ ( snake_case_ ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
26
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowerCAmelCase_ ( snake_case_ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A : str = k.replace(snake_case_,snake_case_ ) if k.startswith("""encoder""" ): _A : Optional[Any] = k.replace(""".attn""",""".self_attn""" ) _A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" ) elif k.startswith("""decoder""" ): _A : str = k.replace("""norm1""","""self_attn_layer_norm""" ) _A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" ) _A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" ) return k def lowerCAmelCase_ ( snake_case_ ): _A : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _A : str = sd.pop(snake_case_ ) _A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" ) assert new_k not in sd _A : Optional[int] = v _snake_case = ["START"] @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) _A : List[Any] = model["""model"""] _A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ ) _A : List[str] = BlenderbotForConditionalGeneration(snake_case_ ) _A : Tuple = m.model.state_dict().keys() _A : Any = [] _A : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A : Optional[int] = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_,strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) _snake_case = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
26
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( snake_case_ ): _A : Optional[int] = [] if isinstance(snake_case_,snake_case_ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_,(list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_,torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("""Not supported""" ) return shapes @torch.jit.ignore def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : int = [] for d in reversed(snake_case_ ): idx.append(flat_idx % d ) _A : List[str] = flat_idx // d return tuple(reversed(snake_case_ ) ) @torch.jit.ignore def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = None,snake_case_ = None,): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(snake_case_ ) -> None: _A : List[str] = True for i in range(len(snake_case_ ) ): _A : List[str] = -1 * (i + 1) l[reversed_idx] &= tally _A : List[Any] = l[reversed_idx] if start_edges is None: _A : str = [s == 0 for s in start] reduce_edge_list(snake_case_ ) if end_edges is None: _A : Any = [e == (d - 1) for e, d in zip(snake_case_,snake_case_ )] reduce_edge_list(snake_case_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case_ ) == 0: return [()] elif len(snake_case_ ) == 1: return [(slice(start[0],end[0] + 1 ),)] _A : List[Tuple[slice, ...]] = [] _A : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case_,snake_case_ ): if s == e: path_list.append(slice(snake_case_,s + 1 ) ) else: break _A : Tuple[slice, ...] = tuple(snake_case_ ) _A : str = len(snake_case_ ) # start == end, and we're done if divergence_idx == len(snake_case_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _A : Any = start[divergence_idx] return tuple( path + (slice(snake_case_,sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :],[d - 1 for d in dims[divergence_idx + 1 :]],dims[divergence_idx + 1 :],start_edges=start_edges[divergence_idx + 1 :],end_edges=[True for _ in end_edges[divergence_idx + 1 :]],) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _A : Union[str, Any] = end[divergence_idx] return tuple( path + (slice(snake_case_,edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]],end[divergence_idx + 1 :],dims[divergence_idx + 1 :],start_edges=[True for _ in start_edges[divergence_idx + 1 :]],end_edges=end_edges[divergence_idx + 1 :],) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx],end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx],end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1,end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) _A : List[str] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1,end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Optional[int] = t.shape[:no_batch_dims] _A : Tuple = list(_flat_idx_to_idx(snake_case_,snake_case_ ) ) # _get_minimal_slice_set is inclusive _A : str = list(_flat_idx_to_idx(flat_end - 1,snake_case_ ) ) # Get an ordered list of slices to perform _A : int = _get_minimal_slice_set( snake_case_,snake_case_,snake_case_,) _A : Optional[int] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ = False,snake_case_ = None,snake_case_ = False,): if not (len(snake_case_ ) > 0): raise ValueError("""Must provide at least one input""" ) _A : int = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )] _A : Tuple = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] ) def _prep_inputs(snake_case_ ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: _A : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) _A : Union[str, Any] = t.reshape(-1,*t.shape[no_batch_dims:] ) else: _A : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t _A : Dict[str, Any] = tensor_tree_map(_prep_inputs,snake_case_ ) _A : Tuple = None if _out is not None: _A : Optional[int] = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ),_out ) _A : Optional[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d _A : str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case_ ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t _A : Any = 0 _A : List[str] = prepped_outputs for _ in range(snake_case_ ): # Chunk the input if not low_mem: _A : List[Any] = _select_chunk else: _A : str = partial( _chunk_slice,flat_start=snake_case_,flat_end=min(snake_case_,i + chunk_size ),no_batch_dims=len(snake_case_ ),) _A : Dict[str, Any] = tensor_tree_map(snake_case_,snake_case_ ) # Run the layer on the chunk _A : Tuple = layer(**snake_case_ ) # Allocate space for the output if out is None: _A : Optional[Any] = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ),snake_case_ ) # Put the chunk in its pre-allocated space if isinstance(snake_case_,snake_case_ ): def assign(snake_case_,snake_case_ ) -> None: for k, v in da.items(): if isinstance(snake_case_,snake_case_ ): assign(snake_case_,da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: _A : List[Any] = da[k] assign(snake_case_,snake_case_ ) elif isinstance(snake_case_,snake_case_ ): for xa, xa in zip(snake_case_,snake_case_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: _A : Tuple = xa elif isinstance(snake_case_,torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: _A : Optional[int] = output_chunk else: raise ValueError("""Not supported""" ) i += chunk_size _A : int = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ),snake_case_ ) return out class lowercase : def __init__( self , _a = 512 , ) -> str: _A : Union[str, Any] = max_chunk_size _A : Optional[int] = None _A : Optional[tuple] = None def a__ ( self , _a , _a , _a ) -> int: logging.info("""Tuning chunk size...""" ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size _A : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] _A : List[Any] = [c for c in candidates if c > min_chunk_size] _A : Optional[Any] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(_a ) -> bool: try: with torch.no_grad(): fn(*_a , chunk_size=_a ) return True except RuntimeError: return False _A : Optional[int] = 0 _A : str = len(_a ) - 1 while i > min_viable_chunk_size_index: _A : Optional[Any] = test_chunk_size(candidates[i] ) if not viable: _A : Union[str, Any] = (min_viable_chunk_size_index + i) // 2 else: _A : Union[str, Any] = i _A : Optional[Any] = (i + len(_a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def a__ ( self , _a , _a ) -> bool: _A : Optional[Any] = True for aa, aa in zip(_a , _a ): assert type(_a ) == type(_a ) if isinstance(_a , (list, tuple) ): consistent &= self._compare_arg_caches(_a , _a ) elif isinstance(_a , _a ): _A : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )] _A : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )] consistent &= self._compare_arg_caches(_a , _a ) else: consistent &= aa == aa return consistent def a__ ( self , _a , _a , _a , ) -> int: _A : Tuple = True _A : tuple = tree_map(lambda _a : a.shape if isinstance(_a , torch.Tensor ) else a , _a , _a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(_a ) _A : List[Any] = self._compare_arg_caches(self.cached_arg_data , _a ) else: # Otherwise, we can reuse the precomputed value _A : Tuple = False if not consistent: _A : int = self._determine_favorable_chunk_size( _a , _a , _a , ) _A : int = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , ) -> int: super().__init__() _A : List[Any] = value_function _A : Union[str, Any] = unet _A : str = scheduler _A : Union[str, Any] = env _A : Tuple = env.get_dataset() _A : Optional[int] = {} for key in self.data.keys(): try: _A : Any = self.data[key].mean() except: # noqa: E722 pass _A : Union[str, Any] = {} for key in self.data.keys(): try: _A : Optional[Any] = self.data[key].std() except: # noqa: E722 pass _A : str = env.observation_space.shape[0] _A : Union[str, Any] = env.action_space.shape[0] def a__ ( self , _a , _a ) -> Union[str, Any]: return (x_in - self.means[key]) / self.stds[key] def a__ ( self , _a , _a ) -> Optional[int]: return x_in * self.stds[key] + self.means[key] def a__ ( self , _a ) -> List[str]: if type(_a ) is dict: return {k: self.to_torch(_a ) for k, v in x_in.items()} elif torch.is_tensor(_a ): return x_in.to(self.unet.device ) return torch.tensor(_a , device=self.unet.device ) def a__ ( self , _a , _a , _a ) -> Optional[Any]: for key, val in cond.items(): _A : int = val.clone() return x_in def a__ ( self , _a , _a , _a , _a ) -> Optional[int]: _A : Union[str, Any] = x.shape[0] _A : Any = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model _A : Dict = torch.full((batch_size,) , _a , device=self.unet.device , dtype=torch.long ) for _ in range(_a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _A : List[Any] = self.value_function(x.permute(0 , 2 , 1 ) , _a ).sample _A : Optional[Any] = torch.autograd.grad([y.sum()] , [x] )[0] _A : Optional[int] = self.scheduler._get_variance(_a ) _A : List[Any] = torch.exp(0.5 * posterior_variance ) _A : Tuple = model_std * grad _A : Optional[Any] = 0 _A : Any = x.detach() _A : List[Any] = x + scale * grad _A : Any = self.reset_xa(_a , _a , self.action_dim ) _A : str = self.unet(x.permute(0 , 2 , 1 ) , _a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg _A : Dict = self.scheduler.step(_a , _a , _a , predict_epsilon=_a )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) _A : str = self.reset_xa(_a , _a , self.action_dim ) _A : List[Any] = self.to_torch(_a ) return x, y def __call__( self , _a , _a=64 , _a=32 , _a=2 , _a=0.1 ) -> Any: # normalize the observations and create batch dimension _A : Any = self.normalize(_a , """observations""" ) _A : Optional[int] = obs[None].repeat(_a , axis=0 ) _A : List[str] = {0: self.to_torch(_a )} _A : int = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _A : Optional[int] = randn_tensor(_a , device=self.unet.device ) _A : int = self.reset_xa(_a , _a , self.action_dim ) _A : int = self.to_torch(_a ) # run the diffusion process _A , _A : List[Any] = self.run_diffusion(_a , _a , _a , _a ) # sort output trajectories by value _A : Optional[int] = y.argsort(0 , descending=_a ).squeeze() _A : Tuple = x[sorted_idx] _A : Optional[int] = sorted_values[:, :, : self.action_dim] _A : Union[str, Any] = actions.detach().cpu().numpy() _A : Tuple = self.de_normalize(_a , key="""actions""" ) # select the action with the highest value if y is not None: _A : int = 0 else: # if we didn't run value guiding, select a random action _A : Optional[Any] = np.random.randint(0 , _a ) _A : Any = denorm_actions[selected_index, 0] return denorm_actions
26
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( snake_case_,snake_case_ ): assert isinstance(snake_case_,snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""",[False, True] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Optional[int] = tmp_path / """cache""" _A : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _A : Dict = ParquetDatasetReader(snake_case_,cache_dir=snake_case_,keep_in_memory=snake_case_ ).read() _check_parquet_dataset(snake_case_,snake_case_ ) @pytest.mark.parametrize( """features""",[ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ],) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Union[str, Any] = tmp_path / """cache""" _A : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _A : Optional[int] = features.copy() if features else default_expected_features _A : Tuple = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) _A : Optional[int] = ParquetDatasetReader(snake_case_,features=snake_case_,cache_dir=snake_case_ ).read() _check_parquet_dataset(snake_case_,snake_case_ ) @pytest.mark.parametrize("""split""",[None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Dict = tmp_path / """cache""" _A : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _A : Optional[int] = ParquetDatasetReader(snake_case_,cache_dir=snake_case_,split=snake_case_ ).read() _check_parquet_dataset(snake_case_,snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""",[str, list] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if issubclass(snake_case_,snake_case_ ): _A : str = parquet_path elif issubclass(snake_case_,snake_case_ ): _A : List[Any] = [parquet_path] _A : Tuple = tmp_path / """cache""" _A : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _A : str = ParquetDatasetReader(snake_case_,cache_dir=snake_case_ ).read() _check_parquet_dataset(snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=("train",) ): assert isinstance(snake_case_,snake_case_ ) for split in splits: _A : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""",[False, True] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Optional[Any] = tmp_path / """cache""" _A : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _A : List[str] = ParquetDatasetReader( {"""train""": parquet_path},cache_dir=snake_case_,keep_in_memory=snake_case_ ).read() _check_parquet_datasetdict(snake_case_,snake_case_ ) @pytest.mark.parametrize( """features""",[ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ],) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Union[str, Any] = tmp_path / """cache""" _A : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _A : str = features.copy() if features else default_expected_features _A : List[Any] = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) _A : Optional[int] = ParquetDatasetReader({"""train""": parquet_path},features=snake_case_,cache_dir=snake_case_ ).read() _check_parquet_datasetdict(snake_case_,snake_case_ ) @pytest.mark.parametrize("""split""",[None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if split: _A : List[str] = {split: parquet_path} else: _A : List[Any] = """train""" _A : Union[str, Any] = {"""train""": parquet_path, """test""": parquet_path} _A : str = tmp_path / """cache""" _A : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _A : Union[str, Any] = ParquetDatasetReader(snake_case_,cache_dir=snake_case_ ).read() _check_parquet_datasetdict(snake_case_,snake_case_,splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Dict = ParquetDatasetWriter(snake_case_,tmp_path / """foo.parquet""" ) assert writer.write() > 0 _A : List[str] = pq.ParquetFile(tmp_path / """foo.parquet""" ) _A : Dict = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : int = str(shared_datadir / """test_image_rgb.jpg""" ) _A : Any = {"""image""": [image_path]} _A : Union[str, Any] = Features({"""image""": Image()} ) _A : Union[str, Any] = Dataset.from_dict(snake_case_,features=snake_case_ ) _A : str = ParquetDatasetWriter(snake_case_,tmp_path / """foo.parquet""" ) assert writer.write() > 0 _A : int = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _A : int = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ),streaming=snake_case_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""",[ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ],) def lowerCAmelCase_ ( snake_case_,snake_case_ ): assert get_writer_batch_size(snake_case_ ) == expected
26
def lowerCAmelCase_ ( snake_case_ ): if n_term == "": return [] _A : list = [] for temp in range(int(snake_case_ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": _snake_case = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
26
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _snake_case = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_,snake_case_ ): if args.student_type == "roberta": _A : List[str] = False elif args.student_type == "gpt2": _A : Optional[int] = False def lowerCAmelCase_ ( snake_case_,snake_case_ ): if args.student_type == "roberta": _A : str = False def lowerCAmelCase_ ( ): _A : Union[str, Any] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""",action="""store_true""",help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""",type=snake_case_,required=snake_case_,help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""",type=snake_case_,required=snake_case_,help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""",) parser.add_argument( """--student_type""",type=snake_case_,choices=["""distilbert""", """roberta""", """gpt2"""],required=snake_case_,help="""The student type (DistilBERT, RoBERTa).""",) parser.add_argument("""--student_config""",type=snake_case_,required=snake_case_,help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""",default=snake_case_,type=snake_case_,help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""",choices=["""bert""", """roberta""", """gpt2"""],required=snake_case_,help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""",type=snake_case_,required=snake_case_,help="""The teacher model.""" ) parser.add_argument("""--temperature""",default=2.0,type=snake_case_,help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""",default=0.5,type=snake_case_,help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""",default=0.0,type=snake_case_,help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""",) parser.add_argument("""--alpha_clm""",default=0.5,type=snake_case_,help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""",default=0.0,type=snake_case_,help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""",default=0.0,type=snake_case_,help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""",action="""store_true""",help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""",default=0.15,type=snake_case_,help="""Proportion of tokens for which we need to make a prediction.""",) parser.add_argument("""--word_mask""",default=0.8,type=snake_case_,help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""",default=0.1,type=snake_case_,help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""",default=0.1,type=snake_case_,help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""",default=0.7,type=snake_case_,help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""",) parser.add_argument("""--token_counts""",type=snake_case_,help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""",action="""store_true""",help="""If true, compute the distillation loss only the [MLM] prediction distribution.""",) parser.add_argument( """--freeze_pos_embs""",action="""store_true""",help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""",) parser.add_argument( """--freeze_token_type_embds""",action="""store_true""",help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""",) parser.add_argument("""--n_epoch""",type=snake_case_,default=3,help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""",type=snake_case_,default=5,help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""",action="""store_false""",help="""If true, group sequences that have similar length into the same batch. Default is true.""",) parser.add_argument( """--gradient_accumulation_steps""",type=snake_case_,default=50,help="""Gradient accumulation for larger training batches.""",) parser.add_argument("""--warmup_prop""",default=0.05,type=snake_case_,help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""",default=0.0,type=snake_case_,help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""",default=5e-4,type=snake_case_,help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""",default=1e-6,type=snake_case_,help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""",default=5.0,type=snake_case_,help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""",default=0.02,type=snake_case_,help="""Random initialization range.""" ) parser.add_argument( """--fp16""",action="""store_true""",help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""",) parser.add_argument( """--fp16_opt_level""",type=snake_case_,default="""O1""",help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ),) parser.add_argument("""--n_gpu""",type=snake_case_,default=1,help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""",type=snake_case_,default=-1,help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""",type=snake_case_,default=56,help="""Random seed""" ) parser.add_argument("""--log_interval""",type=snake_case_,default=500,help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""",type=snake_case_,default=4000,help="""Checkpoint interval.""" ) _A : List[str] = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path,"""parameters.json""" ),"""w""" ) as f: json.dump(vars(snake_case_ ),snake_case_,indent=4 ) git_log(args.dump_path ) _A , _A , _A : List[Any] = MODEL_CLASSES[args.student_type] _A , _A , _A : Union[str, Any] = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A : List[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A : List[Any] = tokenizer.all_special_tokens.index(snake_case_ ) _A : List[Any] = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) _A : int = special_tok_ids _A : Dict = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file,"""rb""" ) as fp: _A : List[Any] = pickle.load(snake_case_ ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts,"""rb""" ) as fp: _A : Optional[Any] = pickle.load(snake_case_ ) _A : Union[str, Any] = np.maximum(snake_case_,1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A : List[Any] = 0.0 # do not predict special tokens _A : List[str] = torch.from_numpy(snake_case_ ) else: _A : List[str] = None _A : int = LmSeqsDataset(params=snake_case_,data=snake_case_ ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) _A : Tuple = student_config_class.from_pretrained(args.student_config ) _A : Dict = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A : List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights,config=snake_case_ ) else: _A : Optional[int] = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info("""Student loaded.""" ) # TEACHER # _A : Any = teacher_model_class.from_pretrained(args.teacher_name,output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_,snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_,snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A : Optional[int] = Distiller( params=snake_case_,dataset=snake_case_,token_probs=snake_case_,student=snake_case_,teacher=snake_case_ ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
26
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _snake_case = logging.get_logger(__name__) _snake_case = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) _snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCAmelCase_ ( snake_case_ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _A : List[str] = model_type_to_module_name(snake_case_ ) _A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" ) try: return getattr(snake_case_,snake_case_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A : List[Any] = importlib.import_module("""transformers""" ) if hasattr(snake_case_,snake_case_ ): return getattr(snake_case_,snake_case_ ) return None def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,): _A : Optional[int] = get_file_from_repo( snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case_,encoding="""utf-8""" ) as reader: return json.load(snake_case_ ) class lowercase : def __init__( self ) -> List[Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_a ) def a__ ( cls , _a , **_a ) -> Any: _A : Tuple = kwargs.pop("""config""" , _a ) _A : Tuple = kwargs.pop("""trust_remote_code""" , _a ) _A : List[Any] = True _A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a ) _A : Tuple = config_dict.get("""feature_extractor_type""" , _a ) _A : int = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): _A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_a , _a ): _A : int = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.feature_extractor_type`` _A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a ) if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: _A : Tuple = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _A : Optional[Any] = feature_extractor_class_from_name(_a ) _A : List[Any] = feature_extractor_auto_map is not None _A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING _A : Optional[int] = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _A : Dict = get_class_from_dynamic_module( _a , _a , **_a ) _A : str = kwargs.pop("""code_revision""" , _a ) if os.path.isdir(_a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_a , **_a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_a , **_a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_a ) in FEATURE_EXTRACTOR_MAPPING: _A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )] return feature_extractor_class.from_dict(_a , **_a ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a__ ( _a , _a ) -> Optional[int]: FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
26
1
from __future__ import annotations _snake_case = 1.6021e-19 # units = C def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
26
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
1
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( snake_case_ ): return np.maximum(0,snake_case_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
26
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _snake_case = logging.get_logger("transformers.models.speecht5") def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): hf_model.apply_weight_norm() _A : Tuple = checkpoint["""input_conv.weight_g"""] _A : List[Any] = checkpoint["""input_conv.weight_v"""] _A : int = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _A : Optional[Any] = checkpoint[f'''upsamples.{i}.1.weight_g'''] _A : Any = checkpoint[f'''upsamples.{i}.1.weight_v'''] _A : Optional[int] = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _A : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] _A : Optional[int] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] _A : List[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] _A : Tuple = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] _A : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] _A : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] _A : Optional[int] = checkpoint["""output_conv.1.weight_g"""] _A : str = checkpoint["""output_conv.1.weight_v"""] _A : Union[str, Any] = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None,snake_case_=None,): if config_path is not None: _A : List[Any] = SpeechTaHifiGanConfig.from_pretrained(snake_case_ ) else: _A : int = SpeechTaHifiGanConfig() _A : List[Any] = SpeechTaHifiGan(snake_case_ ) _A : Optional[Any] = torch.load(snake_case_ ) load_weights(orig_checkpoint["""model"""]["""generator"""],snake_case_,snake_case_ ) _A : Tuple = np.load(snake_case_ ) _A : Dict = stats[0].reshape(-1 ) _A : List[Any] = stats[1].reshape(-1 ) _A : Tuple = torch.from_numpy(snake_case_ ).float() _A : str = torch.from_numpy(snake_case_ ).float() model.save_pretrained(snake_case_ ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _snake_case = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
26
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _snake_case = getLogger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,): _A : Dict = str(snake_case_ ) assert local_rank is not None torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ ) _A : Tuple = Path(snake_case_ ) _A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(snake_case_ ) _A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda() if fpaa: _A : Any = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params _A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _A : int = num_return_sequences _A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: _A : Optional[int] = tokenizer.model_max_length if prefix is None: _A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """""" _A : Optional[int] = SeqaSeqDataset( snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ ) _A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn ) _A : Optional[Any] = [] for batch in tqdm(snake_case_ ): _A : Tuple = model.generate( input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,) _A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ ) _A : Dict = batch["""ids"""] if num_return_sequences > 1: _A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case_ ): results.append({"""pred""": pred, """id""": ids[i].item()} ) save_json(snake_case_,snake_case_ ) return results, sampler.num_replicas def lowerCAmelCase_ ( ): _A : Tuple = argparse.ArgumentParser( epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" ) parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" ) parser.add_argument( """--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",) parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" ) parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ ) parser.add_argument( """--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" ) parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" ) parser.add_argument( """--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" ) parser.add_argument( """--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" ) parser.add_argument( """--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" ) parser.add_argument( """--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",) parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ ) parser.add_argument( """--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" ) parser.add_argument("""--fp16""",action="""store_true""" ) parser.add_argument("""--debug""",action="""store_true""" ) _A : Union[str, Any] = time.time() _A , _A : List[str] = parser.parse_known_args() _A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) _A : Dict = Path(args.save_dir + """_tmp""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking. _A : int = list(json_save_dir.glob("""rank_*.json""" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _A : Any = {} if args.src_lang is not None: _A : int = args.src_lang if args.tgt_lang is not None: _A : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case_ ) _A , _A : str = eval_data_dir( args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,) if args.local_rank <= 0: _A : List[Any] = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case_ ) _A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout ) _A : Optional[int] = combine_partial_results(snake_case_ ) if args.num_return_sequences > 1: _A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(snake_case_,snake_case_ ) return _A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" ) with open(snake_case_ ) as f: _A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )] # Calculate metrics, save metrics, and save _generations.txt _A : Dict = """translation""" in args.task _A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge _A : Tuple = """bleu""" if calc_bleu else """rouge""" _A : Dict = score_fn(snake_case_,snake_case_ ) _A : List[Any] = len(snake_case_ ) _A : Optional[int] = time.time() - start_time _A : Dict = round(runtime / metrics["""n_obs"""],4 ) _A : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics _A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(snake_case_,snake_case_,indent=snake_case_ ) print(snake_case_ ) write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_ ): _A : Dict = [] for partial_result in partial_results: records.extend(snake_case_ ) _A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] ) _A : List[str] = [x["""pred"""] for x in records] return preds def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): # WAIT FOR lots of .json files _A : Optional[Any] = time.time() logger.info("""waiting for all nodes to finish""" ) _A : List[str] = None while (time.time() - start_wait) < timeout: _A : str = list(save_dir.glob("""rank_*.json""" ) ) if len(snake_case_ ) < num_replicas: continue try: # make sure all json files are fully saved _A : List[str] = lmap(snake_case_,snake_case_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("""Rank 0 gave up on waiting for other processes""" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
26
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } _snake_case = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } _snake_case = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = PRETRAINED_INIT_CONFIGURATION _a = ["input_ids", "attention_mask"] _a = DistilBertTokenizer def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ) -> Tuple: super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , ) _A : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , _a ) != do_lower_case or normalizer_state.get("""strip_accents""" , _a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars ): _A : str = getattr(_a , normalizer_state.pop("""type""" ) ) _A : str = do_lower_case _A : Any = strip_accents _A : Optional[int] = tokenize_chinese_chars _A : int = normalizer_class(**_a ) _A : str = do_lower_case def a__ ( self , _a , _a=None ) -> Dict: _A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a__ ( self , _a , _a = None ) -> List[int]: _A : Dict = [self.sep_token_id] _A : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: _A : Optional[Any] = self._tokenizer.model.save(_a , name=_a ) return tuple(_a )
26
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Any: _A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _A : List[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _A : List[str] = model(_a )["""last_hidden_state"""] _A : Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _A : List[Any] = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
26
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _snake_case = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex _snake_case = 10 _snake_case = 256 def lowerCAmelCase_ ( snake_case_ ): if len(snake_case_ ) < MIN_NUM_TOKENS: return None _A : Tuple = MinHash(num_perm=snake_case_ ) for token in set(snake_case_ ): min_hash.update(token.encode() ) return min_hash def lowerCAmelCase_ ( snake_case_ ): return {t for t in NON_ALPHA.split(snake_case_ ) if len(t.strip() ) > 0} class lowercase : def __init__( self , *, _a = 0.85 , ) -> List[Any]: _A : Any = duplication_jaccard_threshold _A : int = NUM_PERM _A : List[str] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _A : str = defaultdict(_a ) def a__ ( self , _a , _a ) -> None: _A : Any = self._index.query(_a ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_a , _a ) if len(_a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_a ) break else: self._duplicate_clusters[close_duplicates[0]].add(_a ) def a__ ( self ) -> List[List[Dict]]: _A : List[Any] = [] for base, duplicates in self._duplicate_clusters.items(): _A : Dict = [base] + list(_a ) # reformat the cluster to be a list of dict _A : Tuple = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(_a ) return duplicate_clusters def a__ ( self , _a ) -> None: _A : str = self.get_duplicate_clusters() with open(_a , """w""" ) as f: json.dump(_a , _a ) def lowerCAmelCase_ ( snake_case_ ): _A , _A : Tuple = element _A : Any = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowerCAmelCase_ ( snake_case_ ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash,ThreadedIterator(snake_case_,max_queue_size=10000 ),chunksize=100,): if data is not None: yield data def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : int = DuplicationIndex(duplication_jaccard_threshold=snake_case_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case_ ) ),max_queue_size=100 ) ): di.add(snake_case_,snake_case_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Any = get_tokens(snake_case_ ) _A : Dict = get_tokens(snake_case_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _snake_case = None def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = [] for elementa in cluster: _A : List[str] = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: _A : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(snake_case_,snake_case_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: _A : int = 1 extremes.append(snake_case_ ) return extremes def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): global _shared_dataset _A : Any = dataset _A : List[Any] = [] _A : Optional[int] = partial(_find_cluster_extremes_shared,jaccard_threshold=snake_case_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( snake_case_,snake_case_,),total=len(snake_case_ ),): extremes_list.append(snake_case_ ) return extremes_list def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.85 ): _A : List[Any] = make_duplicate_clusters(snake_case_,snake_case_ ) _A : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} _A : List[str] = {} _A : Optional[Any] = find_extremes(snake_case_,snake_case_,snake_case_ ) for extremes in extremes_clusters: for element in extremes: _A : int = element _A : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() ) _A : int = dataset.filter(lambda snake_case_,snake_case_ : idx not in remove_indices,with_indices=snake_case_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _A : int = element["""base_index"""] in extreme_dict if element["is_extreme"]: _A : Any = extreme_dict[element["""base_index"""]]["""copies"""] print(f'''Original dataset size: {len(snake_case_ )}''' ) print(f'''Number of duplicate clusters: {len(snake_case_ )}''' ) print(f'''Files in duplicate cluster: {len(snake_case_ )}''' ) print(f'''Unique files in duplicate cluster: {len(snake_case_ )}''' ) print(f'''Filtered dataset size: {len(snake_case_ )}''' ) return ds_filter, duplicate_clusters
26
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class lowercase ( UpperCamelCase__ ): _a = 42 class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]: super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str: if latents is None: _A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A : Union[str, Any] = latents.to(_a ) _A : int = latents * scheduler.init_noise_sigma return latents def a__ ( self , _a=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A : str = torch.device(F'''cuda:{gpu_id}''' ) _A : Any = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def a__ ( self ) -> List[Any]: if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def a__ ( self , _a , _a , _a , _a , ) -> Tuple: if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): _A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): _A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _A : int = image.to(dtype=self.image_encoder.dtype , device=_a ) _A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""] _A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A : Dict = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: _A : str = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A : List[str] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]: if isinstance(_a , PIL.Image.Image ): _A : List[Any] = 1 elif isinstance(_a , torch.Tensor ): _A : Any = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A : Union[str, Any] = len(_a ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) _A : Optional[int] = self._execution_device _A : Tuple = batch_size * num_images_per_prompt _A : List[Any] = guidance_scale > 1.0 _A : Optional[Any] = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) _A : Optional[int] = self.scheduler.timesteps _A : List[str] = self.prior.config.num_embeddings _A : int = self.prior.config.embedding_dim _A : Optional[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A : List[Any] = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : int = self.scheduler.scale_model_input(_a , _a ) _A : Tuple = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance _A , _A : Optional[Any] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A : Dict = noise_pred.chunk(2 ) _A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A : int = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) _A : List[str] = [] for i, latent in enumerate(_a ): print() _A : List[str] = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) _A : List[Any] = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A : List[str] = images.cpu().numpy() if output_type == "pil": _A : List[Any] = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
26
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowercase ( UpperCamelCase__ ): _a = "fnet" def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Any = vocab_size _A : str = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[str] = num_hidden_layers _A : List[str] = intermediate_size _A : List[Any] = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = initializer_range _A : List[Any] = type_vocab_size _A : List[Any] = layer_norm_eps _A : List[str] = use_tpu_fourier_optimizations _A : str = tpu_short_seq_length
26
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ ): print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ): _A : Union[str, Any] = [] for k, v in d.items(): _A : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(snake_case_,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() ) else: items.append((new_key, v) ) return dict(snake_case_ ) _A : List[Any] = argparse.Namespace() with open(snake_case_,"""r""" ) as yaml_file: try: _A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader ) _A : Optional[int] = flatten_yaml_as_dict(snake_case_ ) for k, v in flat_cfg.items(): setattr(snake_case_,snake_case_,snake_case_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) ) return config def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = MobileViTVaConfig() _A : Tuple = False # dataset if task_name.startswith("""imagenet1k_""" ): _A : Dict = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : int = 384 else: _A : int = 256 _A : List[str] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _A : Union[str, Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _A : str = 384 else: _A : List[Any] = 256 _A : List[str] = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _A : int = 151 _A : int = 512 _A : Optional[int] = """ade20k-id2label.json""" _A : Any = True elif task_name.startswith("""voc_""" ): _A : List[Any] = 21 _A : Dict = 512 _A : Dict = """pascal-voc-id2label.json""" _A : int = True # orig_config _A : Any = load_orig_config_file(snake_case_ ) assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model" _A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 ) assert ( getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 ) if "_deeplabv3" in task_name: _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] ) _A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 ) _A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 ) # id2label _A : List[Any] = """huggingface/label-files""" _A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : str = {int(snake_case_ ): v for k, v in idalabel.items()} _A : str = idalabel _A : Dict = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : Any = dct.pop(snake_case_ ) _A : Union[str, Any] = val def lowerCAmelCase_ ( snake_case_,snake_case_=False ): if base_model: _A : Optional[int] = """""" else: _A : Dict = """mobilevitv2.""" _A : int = [] for k in state_dict.keys(): if k[:8] == "encoder.": _A : Any = k[8:] else: _A : List[str] = k if ".block." in k: _A : Any = k_new.replace(""".block.""",""".""" ) if ".conv." in k: _A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" ) if ".norm." in k: _A : Any = k_new.replace(""".norm.""",""".normalization.""" ) if "conv_1." in k: _A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: _A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" ) if ".red_1x1." in k: _A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: _A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: _A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: _A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: _A : Optional[int] = [0, 1] elif i == 4: _A : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _A : Optional[Any] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: _A : Union[str, Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: _A : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: _A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" ) if "pre_norm_attn.1." in k: _A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" ) if "pre_norm_ffn.0." in k: _A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" ) if "pre_norm_ffn.1." in k: _A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" ) if "classifier.1." in k: _A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" ) if "seg_head." in k: _A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" ) if ".aspp_layer." in k: _A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" ) if ".aspp_pool." in k: _A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case_ ) for k in keys_to_ignore: state_dict.pop(snake_case_,snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ ) # load original state_dict _A : Tuple = torch.load(snake_case_,map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval() _A : str = False else: _A : int = MobileViTVaForImageClassification(snake_case_ ).eval() _A : List[Any] = False # remove and rename some keys of load the original model _A : List[Any] = checkpoint remove_unused_keys(snake_case_ ) _A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load modified state_dict model.load_state_dict(snake_case_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 ) _A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" ) _A : Optional[Any] = model(**snake_case_ ) # verify classification model if task_name.startswith("""imagenet""" ): _A : List[Any] = outputs.logits _A : Optional[int] = logits.argmax(-1 ).item() print("""Predicted class:""",model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) _snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
26
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = VideoToVideoSDPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} _a = PipelineTesterMixin.required_optional_params - {"latents"} _a = False # No `output_type`. _a = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def a__ ( self ) -> Optional[Any]: torch.manual_seed(0 ) _A : Tuple = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) _A : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , ) torch.manual_seed(0 ) _A : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _A : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) _A : Optional[Any] = CLIPTextModel(_a ) _A : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _A : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def a__ ( self , _a , _a=0 ) -> List[Any]: # 3 frames _A : List[str] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): _A : Optional[int] = torch.manual_seed(_a ) else: _A : Any = torch.Generator(device=_a ).manual_seed(_a ) _A : Optional[int] = { """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def a__ ( self ) -> int: _A : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator _A : str = self.get_dummy_components() _A : Optional[int] = VideoToVideoSDPipeline(**_a ) _A : List[Any] = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) _A : int = self.get_dummy_inputs(_a ) _A : Optional[int] = """np""" _A : int = sd_pipe(**_a ).frames _A : Union[str, Any] = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) _A : Dict = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def a__ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a , expected_max_diff=5e-3 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def a__ ( self ) -> Tuple: pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def a__ ( self ) -> Dict: pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def a__ ( self ) -> List[Any]: pass def a__ ( self ) -> List[str]: return super().test_progress_bar() @slow @skip_mps class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: _A : Union[str, Any] = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames _A : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) _A : int = torch.randn((1, 10, 3, 1024, 576) , generator=_a ) _A : int = video.to("""cuda""" ) _A : Optional[Any] = """Spiderman is surfing""" _A : List[Any] = pipe(_a , video=_a , generator=_a , num_inference_steps=3 , output_type="""pt""" ).frames _A : Dict = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
26
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase ( UpperCamelCase__ ): _a = (DPMSolverSDEScheduler,) _a = 1_0 def a__ ( self , **_a ) -> Optional[Any]: _A : str = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**_a ) return config def a__ ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def a__ ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def a__ ( self ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def a__ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def a__ ( self ) -> Optional[int]: _A : Any = self.scheduler_classes[0] _A : List[str] = self.get_scheduler_config() _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Dict = self.dummy_model() _A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Dict = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : str = model(_a , _a ) _A : List[Any] = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Dict = torch.sum(torch.abs(_a ) ) _A : Dict = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Optional[Any]: _A : Dict = self.scheduler_classes[0] _A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) _A : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) _A : Tuple = self.dummy_model() _A : int = self.dummy_sample_deter * scheduler.init_noise_sigma _A : Tuple = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): _A : int = scheduler.scale_model_input(_a , _a ) _A : Tuple = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Optional[int] = output.prev_sample _A : Optional[Any] = torch.sum(torch.abs(_a ) ) _A : List[Any] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def a__ ( self ) -> List[str]: _A : Union[str, Any] = self.scheduler_classes[0] _A : List[Any] = self.get_scheduler_config() _A : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Union[str, Any] = self.dummy_model() _A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A : int = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : Dict = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : str = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def a__ ( self ) -> Union[str, Any]: _A : List[Any] = self.scheduler_classes[0] _A : Optional[Any] = self.get_scheduler_config() _A : int = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) _A : Optional[Any] = self.dummy_model() _A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma _A : str = sample.to(_a ) for t in scheduler.timesteps: _A : Optional[int] = scheduler.scale_model_input(_a , _a ) _A : List[Any] = model(_a , _a ) _A : Dict = scheduler.step(_a , _a , _a ) _A : List[str] = output.prev_sample _A : str = torch.sum(torch.abs(_a ) ) _A : List[str] = torch.mean(torch.abs(_a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
26
1
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _snake_case = logging.getLogger(__name__) _snake_case = "Hello world! cécé herlolip" _snake_case = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Optional[Any] = BertAbsConfig( temp_dir=""".""",finetune_bert=snake_case_,large=snake_case_,share_emb=snake_case_,use_bert_emb=snake_case_,encoder="""bert""",max_pos=512,enc_layers=6,enc_hidden_size=512,enc_heads=8,enc_ff_size=512,enc_dropout=0.2,dec_layers=6,dec_hidden_size=768,dec_heads=8,dec_ff_size=2048,dec_dropout=0.2,) _A : Optional[int] = torch.load(snake_case_,lambda snake_case_,snake_case_ : storage ) _A : Optional[int] = AbsSummarizer(snake_case_,torch.device("""cpu""" ),snake_case_ ) original.eval() _A : List[str] = BertAbsSummarizer(snake_case_,torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) _A : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs _A : List[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) _A : int = torch.tensor(snake_case_ ).unsqueeze(0 ) _A : Any = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) _A : List[str] = torch.tensor(snake_case_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass _A : List[str] = encoder_input_ids _A : int = decoder_input_ids _A : Tuple = None _A : List[Any] = None _A : Tuple = None _A : Tuple = None _A : List[str] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical _A : List[str] = original(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )[0] _A : List[str] = original.generator(snake_case_ ) _A : str = new_model( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )[0] _A : Optional[Any] = new_model.generator(snake_case_ ) _A : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case_ ) ) _A : str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case_ ) ) _A : Optional[Any] = torch.allclose(snake_case_,snake_case_,atol=1e-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict(),"""./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) _snake_case = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
26
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( UpperCamelCase__,UpperCamelCase__ ): _a = 1 @register_to_config def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]: _A : Dict = None _A : List[Any] = None _A : Dict = None def a__ ( self , _a , _a = None ) -> Union[str, Any]: _A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def a__ ( self , _a , _a , _a , _a=None ) -> Dict: if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _A : Any = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _A : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): _A : List[Any] = std.unsqueeze(-1 ) _A : int = -score / std # compute _A : Tuple = -1.0 / len(self.timesteps ) _A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _A : List[str] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _A : Union[str, Any] = beta_t.unsqueeze(-1 ) _A : Tuple = -0.5 * beta_t * x _A : Tuple = torch.sqrt(_a ) _A : Dict = drift - diffusion**2 * score _A : Dict = x + drift * dt # add noise _A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) _A : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
26
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } _snake_case = { "google/fnet-base": 512, "google/fnet-large": 512, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "token_type_ids"] _a = FNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : int = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) _A : Optional[int] = do_lower_case _A : List[Any] = remove_space _A : str = keep_accents _A : int = vocab_file _A : int = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : Any = [self.sep_token_id] _A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : List[str] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
26
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a = True , _a = None , _a = 32 , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = [0.48145466, 0.4578275, 0.40821073] , _a = [0.26862954, 0.26130258, 0.27577711] , _a = True , _a=7 , _a=30 , _a=400 , _a=3 , ) -> List[Any]: _A : List[str] = parent _A : Any = do_resize _A : Tuple = size if size is not None else {"""shortest_edge""": 288} _A : Dict = size_divisor _A : Optional[Any] = do_rescale _A : int = rescale_factor _A : Dict = do_normalize _A : Union[str, Any] = do_center_crop _A : Any = image_mean _A : int = image_std _A : Optional[Any] = do_pad _A : List[str] = batch_size _A : Tuple = num_channels _A : Optional[int] = min_resolution _A : int = max_resolution def a__ ( self ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a__ ( self , _a , _a=False ) -> Optional[int]: if not batched: _A : Optional[int] = self.size["""shortest_edge"""] _A : Tuple = image_inputs[0] if isinstance(_a , Image.Image ): _A , _A : Dict = image.size else: _A , _A : Union[str, Any] = image.shape[1], image.shape[2] _A : int = size / min(_a , _a ) if h < w: _A , _A : int = size, scale * w else: _A , _A : Dict = scale * h, size _A : int = int((1333 / 800) * size ) if max(_a , _a ) > max_size: _A : Union[str, Any] = max_size / max(_a , _a ) _A : Any = newh * scale _A : Union[str, Any] = neww * scale _A , _A : str = int(newh + 0.5 ), int(neww + 0.5 ) _A , _A : Any = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: _A : Dict = [] for image in image_inputs: _A , _A : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A : List[Any] = max(_a , key=lambda _a : item[0] )[0] _A : Dict = max(_a , key=lambda _a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = BridgeTowerImageProcessor if is_vision_available() else None def a__ ( self ) -> Any: _A : int = BridgeTowerImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> List[str]: _A : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """size_divisor""" ) ) def a__ ( self ) -> List[Any]: pass def a__ ( self ) -> Tuple: # Initialize image processor _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _A , _A : Optional[Any] = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values _A , _A : int = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a__ ( self ) -> Tuple: # Initialize image processor _A : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _A , _A : Dict = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A : List[Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values _A , _A : Tuple = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a__ ( self ) -> Any: # Initialize image processor _A : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _A , _A : Any = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A : Optional[Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values _A , _A : str = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
26
from math import asin, atan, cos, radians, sin, sqrt, tan _snake_case = 6_3_7_8_1_3_7.0 _snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5 _snake_case = 6378137 def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): _A : Any = (AXIS_A - AXIS_B) / AXIS_A _A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) _A : Optional[Any] = radians(snake_case_ ) _A : str = radians(snake_case_ ) # Equation _A : Dict = sin((phi_a - phi_a) / 2 ) _A : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
26
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _snake_case = 16 _snake_case = 32 def lowerCAmelCase_ ( snake_case_,snake_case_ = 16 ): _A : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _A : Any = load_dataset("""glue""","""mrpc""" ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) _A : int = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=snake_case_,max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _A : Dict = datasets.map( snake_case_,batched=snake_case_,remove_columns=["""idx""", """sentence1""", """sentence2"""],) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _A : List[Any] = tokenized_datasets.rename_column("""label""","""labels""" ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _A : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _A : Any = 16 elif accelerator.mixed_precision != "no": _A : int = 8 else: _A : str = None return tokenizer.pad( snake_case_,padding="""longest""",max_length=snake_case_,pad_to_multiple_of=snake_case_,return_tensors="""pt""",) # Instantiate dataloaders. _A : Dict = DataLoader( tokenized_datasets["""train"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ ) _A : str = DataLoader( tokenized_datasets["""validation"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _snake_case = mocked_dataloaders # noqa: F811 def lowerCAmelCase_ ( snake_case_,snake_case_ ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""",snake_case_ ) == "1": _A : Optional[Any] = 2 # New Code # _A : str = int(args.gradient_accumulation_steps ) # Initialize accelerator _A : Dict = Accelerator( cpu=args.cpu,mixed_precision=args.mixed_precision,gradient_accumulation_steps=snake_case_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A : int = config["""lr"""] _A : str = int(config["""num_epochs"""] ) _A : Tuple = int(config["""seed"""] ) _A : Optional[Any] = int(config["""batch_size"""] ) _A : int = evaluate.load("""glue""","""mrpc""" ) set_seed(snake_case_ ) _A , _A : Dict = get_dataloaders(snake_case_,snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""",return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _A : List[Any] = model.to(accelerator.device ) # Instantiate optimizer _A : Any = AdamW(params=model.parameters(),lr=snake_case_ ) # Instantiate scheduler _A : Optional[Any] = get_linear_schedule_with_warmup( optimizer=snake_case_,num_warmup_steps=100,num_training_steps=(len(snake_case_ ) * num_epochs),) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A , _A , _A , _A , _A : List[Any] = accelerator.prepare( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case_ ): _A : List[Any] = model(**snake_case_ ) _A : Dict = output.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _A : Dict = model(**snake_case_ ) _A : Any = outputs.logits.argmax(dim=-1 ) _A , _A : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case_,references=snake_case_,) _A : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''',snake_case_ ) def lowerCAmelCase_ ( ): _A : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""",type=snake_case_,default=snake_case_,choices=["""no""", """fp16""", """bf16""", """fp8"""],help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""",) # New Code # parser.add_argument( """--gradient_accumulation_steps""",type=snake_case_,default=1,help="""The number of minibatches to be ran before gradients are accumulated.""",) parser.add_argument("""--cpu""",action="""store_true""",help="""If passed, will train on the CPU.""" ) _A : Tuple = parser.parse_args() _A : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case_,snake_case_ ) if __name__ == "__main__": main()
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _snake_case = get_logger(__name__) _snake_case = Path(__file__).parent / "model_card_template.md" _snake_case = uuida().hex _snake_case = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES _snake_case = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES _snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" def lowerCAmelCase_ ( snake_case_ = None ): _A : List[Any] = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""","""""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case_,snake_case_ ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(snake_case_,snake_case_ ): ua += "; " + user_agent return ua def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = None ): if token is None: _A : Union[str, Any] = HfFolder.get_token() if organization is None: _A : Any = whoami(snake_case_ )["""name"""] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def lowerCAmelCase_ ( snake_case_,snake_case_ ): if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(snake_case_,"""local_rank""" ) and args.local_rank not in [-1, 0]: return _A : Any = args.hub_token if hasattr(snake_case_,"""hub_token""" ) else None _A : int = get_full_repo_name(snake_case_,token=snake_case_ ) _A : Optional[Any] = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""",license="""apache-2.0""",library_name="""diffusers""",tags=[],datasets=args.dataset_name,metrics=[],),template_path=snake_case_,model_name=snake_case_,repo_name=snake_case_,dataset_name=args.dataset_name if hasattr(snake_case_,"""dataset_name""" ) else None,learning_rate=args.learning_rate,train_batch_size=args.train_batch_size,eval_batch_size=args.eval_batch_size,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case_,"""gradient_accumulation_steps""" ) else None ),adam_betaa=args.adam_betaa if hasattr(snake_case_,"""adam_beta1""" ) else None,adam_betaa=args.adam_betaa if hasattr(snake_case_,"""adam_beta2""" ) else None,adam_weight_decay=args.adam_weight_decay if hasattr(snake_case_,"""adam_weight_decay""" ) else None,adam_epsilon=args.adam_epsilon if hasattr(snake_case_,"""adam_epsilon""" ) else None,lr_scheduler=args.lr_scheduler if hasattr(snake_case_,"""lr_scheduler""" ) else None,lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case_,"""lr_warmup_steps""" ) else None,ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case_,"""ema_inv_gamma""" ) else None,ema_power=args.ema_power if hasattr(snake_case_,"""ema_power""" ) else None,ema_max_decay=args.ema_max_decay if hasattr(snake_case_,"""ema_max_decay""" ) else None,mixed_precision=args.mixed_precision,) _A : int = os.path.join(args.output_dir,"""README.md""" ) model_card.save(snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_ = None ): if resolved_file is None or commit_hash is not None: return commit_hash _A : int = str(Path(snake_case_ ).as_posix() ) _A : List[Any] = re.search(r"""snapshots/([^/]+)/""",snake_case_ ) if search is None: return None _A : int = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _snake_case = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) _snake_case = os.path.join(hf_cache_home, "diffusers") def lowerCAmelCase_ ( snake_case_ = None,snake_case_ = None ): if new_cache_dir is None: _A : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: _A : str = old_diffusers_cache _A : Any = Path(snake_case_ ).expanduser() _A : Tuple = Path(snake_case_ ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): _A : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(snake_case_ ) new_blob_path.parent.mkdir(parents=snake_case_,exist_ok=snake_case_ ) os.replace(snake_case_,snake_case_ ) try: os.symlink(snake_case_,snake_case_ ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _snake_case = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): _snake_case = 0 else: with open(cache_version_file) as f: try: _snake_case = int(f.read()) except ValueError: _snake_case = 0 if cache_version < 1: _snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: _snake_case = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ "the directory exists and can be written to." ) def lowerCAmelCase_ ( snake_case_,snake_case_ = None ): if variant is not None: _A : List[str] = weights_name.split(""".""" ) _A : Dict = splits[:-1] + [variant] + splits[-1:] _A : Any = """.""".join(snake_case_ ) return weights_name def lowerCAmelCase_ ( snake_case_,*, snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=None,): _A : Tuple = str(snake_case_ ) if os.path.isfile(snake_case_ ): return pretrained_model_name_or_path elif os.path.isdir(snake_case_ ): if os.path.isfile(os.path.join(snake_case_,snake_case_ ) ): # Load from a PyTorch checkpoint _A : Tuple = os.path.join(snake_case_,snake_case_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case_,snake_case_,snake_case_ ) ): _A : List[Any] = os.path.join(snake_case_,snake_case_,snake_case_ ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case_ ).base_version ) >= version.parse("""0.20.0""" ) ): try: _A : List[Any] = hf_hub_download( snake_case_,filename=_add_variant(snake_case_,snake_case_ ),cache_dir=snake_case_,force_download=snake_case_,proxies=snake_case_,resume_download=snake_case_,local_files_only=snake_case_,use_auth_token=snake_case_,user_agent=snake_case_,subfolder=snake_case_,revision=revision or commit_hash,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''',snake_case_,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case_,snake_case_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case_,snake_case_ )}\' so that the correct variant file can be added.''',snake_case_,) try: # 2. Load model file as usual _A : Dict = hf_hub_download( snake_case_,filename=snake_case_,cache_dir=snake_case_,force_download=snake_case_,proxies=snake_case_,resume_download=snake_case_,local_files_only=snake_case_,use_auth_token=snake_case_,user_agent=snake_case_,subfolder=snake_case_,revision=revision or commit_hash,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' """this model name. Check the model page at """ f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
26
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xmod" def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : Tuple = vocab_size _A : Union[str, Any] = hidden_size _A : Dict = num_hidden_layers _A : Dict = num_attention_heads _A : List[Any] = hidden_act _A : Optional[Any] = intermediate_size _A : Any = hidden_dropout_prob _A : str = attention_probs_dropout_prob _A : Dict = max_position_embeddings _A : Any = type_vocab_size _A : List[Any] = initializer_range _A : int = layer_norm_eps _A : int = position_embedding_type _A : Any = use_cache _A : int = classifier_dropout _A : int = pre_norm _A : Optional[Any] = adapter_reduction_factor _A : List[Any] = adapter_layer_norm _A : Optional[int] = adapter_reuse_layer_norm _A : Any = ln_before_adapter _A : Union[str, Any] = list(_a ) _A : List[Any] = default_language class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
26
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class lowercase ( UpperCamelCase__ ): _a = "levit" def __init__( self , _a=224 , _a=3 , _a=3 , _a=2 , _a=1 , _a=16 , _a=[128, 256, 384] , _a=[4, 8, 12] , _a=[4, 4, 4] , _a=[16, 16, 16] , _a=0 , _a=[2, 2, 2] , _a=[2, 2, 2] , _a=0.02 , **_a , ) -> Any: super().__init__(**_a ) _A : List[Any] = image_size _A : Union[str, Any] = num_channels _A : Optional[Any] = kernel_size _A : Optional[int] = stride _A : int = padding _A : Optional[int] = hidden_sizes _A : List[str] = num_attention_heads _A : Tuple = depths _A : Any = key_dim _A : Optional[Any] = drop_path_rate _A : Tuple = patch_size _A : Tuple = attention_ratio _A : int = mlp_ratio _A : Any = initializer_range _A : Tuple = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowercase ( UpperCamelCase__ ): _a = version.parse("1.11" ) @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def a__ ( self ) -> float: return 1e-4
26
def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) if n == 0: return 0 _A : Tuple = float("""-inf""" ) for i in range(1,n + 1 ): _A : str = max( snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) ) return max_revue def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) _A : Dict = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _A : List[str] = float("""-inf""" ) for i in range(1,n + 1 ): _A : Optional[Any] = max( snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),) _A : Tuple = max_revenue return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _enforce_args(snake_case_,snake_case_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _A : Any = 0 for i in range(1,n + 1 ): _A : Optional[Any] = max_rev[i] for j in range(1,i + 1 ): _A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] ) _A : int = max_revenue_i return max_rev[n] def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n < 0: _A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case_ ) if n > len(snake_case_ ): _A : Any = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(snake_case_ )}''' ) raise ValueError(snake_case_ ) def lowerCAmelCase_ ( ): _A : Tuple = [6, 10, 12, 15, 20, 23] _A : List[Any] = len(snake_case_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _A : Any = 36 _A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ ) _A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ ) _A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
26
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json", "xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "xlm" _a = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self , _a=3_0145 , _a=2048 , _a=12 , _a=16 , _a=0.1 , _a=0.1 , _a=True , _a=False , _a=False , _a=False , _a=1 , _a=True , _a=512 , _a=2048**-0.5 , _a=1e-12 , _a=0.02 , _a=0 , _a=1 , _a=2 , _a=3 , _a=5 , _a=True , _a="first" , _a=True , _a=None , _a=True , _a=0.1 , _a=5 , _a=5 , _a=0 , _a=0 , _a=2 , _a=0 , **_a , ) -> Optional[int]: _A : Optional[int] = vocab_size _A : Optional[Any] = emb_dim _A : Optional[int] = n_layers _A : Optional[int] = n_heads _A : List[str] = dropout _A : Optional[int] = attention_dropout _A : Optional[Any] = gelu_activation _A : Union[str, Any] = sinusoidal_embeddings _A : Union[str, Any] = causal _A : List[str] = asm _A : int = n_langs _A : List[Any] = use_lang_emb _A : Any = layer_norm_eps _A : str = bos_index _A : Union[str, Any] = eos_index _A : Optional[Any] = pad_index _A : Optional[int] = unk_index _A : str = mask_index _A : Tuple = is_encoder _A : Dict = max_position_embeddings _A : Tuple = embed_init_std _A : Optional[Any] = init_std _A : Tuple = summary_type _A : Optional[int] = summary_use_proj _A : Optional[Any] = summary_activation _A : Dict = summary_proj_to_labels _A : Union[str, Any] = summary_first_dropout _A : Tuple = start_n_top _A : int = end_n_top _A : Optional[Any] = mask_token_id _A : Union[str, Any] = lang_id if "n_words" in kwargs: _A : List[str] = kwargs["""n_words"""] super().__init__(pad_token_id=_a , bos_token_id=_a , **_a ) class lowercase ( UpperCamelCase__ ): @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
26
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
1